Datasets:

Modalities:
Text
Formats:
parquet
Sub-tasks:
extractive-qa
Languages:
Russian
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
deb870e
1 Parent(s): 92d74b2

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (ba2117f9dac3d0400403c363599fa13cc585e800)
- Delete loading script (daf6abd8b8d38754d355b911010949f9747b1839)
- Delete data file (016049d0d4aaab20b5fbfa66df4e5736e4bf7c78)
- Delete data file (53529f002c6ad14239f69b41426a3f4b95b18271)
- Delete data file (9eb43e503075be7fe9be6f19dbfcb56b06a82011)

README.md CHANGED
@@ -47,8 +47,18 @@ dataset_info:
47
  - name: test
48
  num_bytes: 36397776
49
  num_examples: 23936
50
- download_size: 10491714
51
  dataset_size: 116002270
 
 
 
 
 
 
 
 
 
 
52
  ---
53
 
54
 
 
47
  - name: test
48
  num_bytes: 36397776
49
  num_examples: 23936
50
+ download_size: 19770316
51
  dataset_size: 116002270
52
+ configs:
53
+ - config_name: sberquad
54
+ data_files:
55
+ - split: train
56
+ path: sberquad/train-*
57
+ - split: validation
58
+ path: sberquad/validation-*
59
+ - split: test
60
+ path: sberquad/test-*
61
+ default: true
62
  ---
63
 
64
 
sberquad.py DELETED
@@ -1,104 +0,0 @@
1
- # coding=utf-8
2
- """SberQUAD: Sber Question Answering Dataset."""
3
-
4
- import os
5
- import json
6
-
7
- import datasets
8
- from datasets.tasks import QuestionAnsweringExtractive
9
-
10
-
11
- logger = datasets.logging.get_logger(__name__)
12
-
13
- _CITATION = """\
14
- @article{Efimov_2020,
15
- title={SberQuAD – Russian Reading Comprehension Dataset: Description and Analysis},
16
- ISBN={9783030582197},
17
- ISSN={1611-3349},
18
- url={http://dx.doi.org/10.1007/978-3-030-58219-7_1},
19
- DOI={10.1007/978-3-030-58219-7_1},
20
- journal={Experimental IR Meets Multilinguality, Multimodality, and Interaction},
21
- publisher={Springer International Publishing},
22
- author={Efimov, Pavel and Chertok, Andrey and Boytsov, Leonid and Braslavski, Pavel},
23
- year={2020},
24
- pages={3–15}
25
- }
26
- """
27
-
28
-
29
- _DESCRIPTION = """\
30
- Sber Question Answering Dataset (SberQuAD) is a reading comprehension \
31
- dataset, consisting of questions posed by crowdworkers on a set of Wikipedia \
32
- articles, where the answer to every question is a segment of text, or span, \
33
- from the corresponding reading passage, or the question might be unanswerable. \
34
- Russian original analogue presented in Sberbank Data Science Journey 2017.
35
- """
36
-
37
- _URLS = {"train": os.path.join("data", "train_v1.0.json.gz"), "dev": os.path.join("data", "dev_v1.0.json.gz"), "test": os.path.join("data", "origin_test.json.gz")}
38
-
39
-
40
- class Sberquad(datasets.GeneratorBasedBuilder):
41
- """SberQUAD: Sber Question Answering Dataset. Version 1.0."""
42
-
43
- VERSION = datasets.Version("1.0.0")
44
- BUILDER_CONFIGS = [datasets.BuilderConfig(name="sberquad", version=VERSION, description=_DESCRIPTION)]
45
-
46
- def _info(self):
47
- return datasets.DatasetInfo(
48
- description=_DESCRIPTION,
49
- features=datasets.Features(
50
- {
51
- "id": datasets.Value("int32"),
52
- "title": datasets.Value("string"),
53
- "context": datasets.Value("string"),
54
- "question": datasets.Value("string"),
55
- "answers": datasets.features.Sequence(
56
- {
57
- "text": datasets.Value("string"),
58
- "answer_start": datasets.Value("int32"),
59
- }
60
- ),
61
- }
62
- ),
63
- supervised_keys=None,
64
- homepage="",
65
- citation=_CITATION,
66
- task_templates=[
67
- QuestionAnsweringExtractive(
68
- question_column="question", context_column="context", answers_column="answers"
69
- )
70
- ],
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- downloaded_files = dl_manager.download_and_extract(_URLS)
75
- return [
76
- datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
77
- datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
78
- datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
79
- ]
80
-
81
- def _generate_examples(self, filepath):
82
- """This function returns the examples in the raw (text) form."""
83
- logger.info("generating examples from = %s", filepath)
84
- key = 0
85
- with open(filepath, encoding="utf-8") as f:
86
- squad = json.load(f)
87
- for article in squad["data"]:
88
- title = article.get("title", "")
89
- for paragraph in article["paragraphs"]:
90
- context = paragraph["context"]
91
- for qa in paragraph["qas"]:
92
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
93
- answers = [answer["text"] for answer in qa["answers"]]
94
- yield key, {
95
- "title": title,
96
- "context": context,
97
- "question": qa["question"],
98
- "id": qa["id"],
99
- "answers": {
100
- "answer_start": answer_starts,
101
- "text": answers,
102
- },
103
- }
104
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/dev_v1.0.json.gz → sberquad/test-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a9145c18e890e84fd070272fcaa607a688efd61debf36b89bc2b96654ac77d18
3
- size 1927670
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e92c4d24f2fb2d70d4046237ed8f277b4889b9a424e41eebddf97e3d62f9ac9
3
+ size 4927641
data/train_v1.0.json.gz → sberquad/train-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a05221fe35eb1b4fcd17b8acca1d957e12d098783d8e8a2ebb2348f6c437b0a
3
- size 5838123
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c0ec202bab5e3a0ebfeb9d09943b88777fa2ddce4feb8f43b4c2e766f3dd095
3
+ size 11416696
data/origin_test.json.gz → sberquad/validation-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5d24ee67e71ea95163342b72d8fb36914fa7d0b261b3552ed5824f355fe76963
3
- size 2725921
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a2617d1212cb32e26e5a19f95a3e36a9c89fca8e60f6bcc9fe363ea78610d6f
3
+ size 3425979