Datasets:

Sub-tasks:
extractive-qa
Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
crowdsourced
ArXiv:
Tags:
conversational-qa
License:
albertvillanova HF staff commited on
Commit
9df4273
1 Parent(s): 7315a18

Delete loading script

Browse files
Files changed (1) hide show
  1. coqa.py +0 -91
coqa.py DELETED
@@ -1,91 +0,0 @@
1
- """CoQA dataset."""
2
-
3
-
4
- import json
5
-
6
- import datasets
7
-
8
-
9
- _HOMEPAGE = "https://stanfordnlp.github.io/coqa/"
10
-
11
- _CITATION = """\
12
- @article{reddy-etal-2019-coqa,
13
- title = "{C}o{QA}: A Conversational Question Answering Challenge",
14
- author = "Reddy, Siva and
15
- Chen, Danqi and
16
- Manning, Christopher D.",
17
- journal = "Transactions of the Association for Computational Linguistics",
18
- volume = "7",
19
- year = "2019",
20
- address = "Cambridge, MA",
21
- publisher = "MIT Press",
22
- url = "https://aclanthology.org/Q19-1016",
23
- doi = "10.1162/tacl_a_00266",
24
- pages = "249--266",
25
- }
26
- """
27
-
28
- _DESCRIPTION = """\
29
- CoQA: A Conversational Question Answering Challenge
30
- """
31
-
32
- _TRAIN_DATA_URL = "https://nlp.stanford.edu/data/coqa/coqa-train-v1.0.json"
33
- _DEV_DATA_URL = "https://nlp.stanford.edu/data/coqa/coqa-dev-v1.0.json"
34
-
35
-
36
- class Coqa(datasets.GeneratorBasedBuilder):
37
-
38
- VERSION = datasets.Version("1.0.0")
39
-
40
- def _info(self):
41
- return datasets.DatasetInfo(
42
- description=_DESCRIPTION,
43
- features=datasets.Features(
44
- {
45
- "source": datasets.Value("string"),
46
- "story": datasets.Value("string"),
47
- "questions": datasets.features.Sequence(datasets.Value("string")),
48
- "answers": datasets.features.Sequence(
49
- {
50
- "input_text": datasets.Value("string"),
51
- "answer_start": datasets.Value("int32"),
52
- "answer_end": datasets.Value("int32"),
53
- }
54
- ),
55
- }
56
- ),
57
- homepage=_HOMEPAGE,
58
- citation=_CITATION,
59
- )
60
-
61
- def _split_generators(self, dl_manager):
62
- """Returns SplitGenerators."""
63
- urls_to_download = {"train": _TRAIN_DATA_URL, "dev": _DEV_DATA_URL}
64
- downloaded_files = dl_manager.download_and_extract(urls_to_download)
65
-
66
- return [
67
- datasets.SplitGenerator(
68
- name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"], "split": "train"}
69
- ),
70
- datasets.SplitGenerator(
71
- name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], "split": "validation"}
72
- ),
73
- ]
74
-
75
- def _generate_examples(self, filepath, split):
76
- """Yields examples."""
77
- with open(filepath, encoding="utf-8") as f:
78
- data = json.load(f)
79
- for row in data["data"]:
80
- questions = [question["input_text"] for question in row["questions"]]
81
- story = row["story"]
82
- source = row["source"]
83
- answers_start = [answer["span_start"] for answer in row["answers"]]
84
- answers_end = [answer["span_end"] for answer in row["answers"]]
85
- answers = [answer["input_text"] for answer in row["answers"]]
86
- yield row["id"], {
87
- "source": source,
88
- "story": story,
89
- "questions": questions,
90
- "answers": {"input_text": answers, "answer_start": answers_start, "answer_end": answers_end},
91
- }