Datasets:
Tasks:
Question Answering
Modalities:
Text
Formats:
parquet
Sub-tasks:
open-domain-qa
Languages:
Polish
Size:
1K - 10K
License:
Commit
•
b675937
1
Parent(s):
58819e0
Convert dataset to Parquet (#4)
Browse files- Convert dataset to Parquet (724a8bd3f91accccf5862e2fb310306ffa9cfe4e)
- Delete loading script (43f5b0d5e73467bcd60e8424ce1d160b92afc25e)
- README.md +11 -4
- data/test-00000-of-00001.parquet +3 -0
- data/train-00000-of-00001.parquet +3 -0
- dyk.py +0 -93
README.md
CHANGED
@@ -34,13 +34,20 @@ dataset_info:
|
|
34 |
'1': '1'
|
35 |
splits:
|
36 |
- name: train
|
37 |
-
num_bytes:
|
38 |
num_examples: 4154
|
39 |
- name: test
|
40 |
-
num_bytes:
|
41 |
num_examples: 1029
|
42 |
-
download_size:
|
43 |
-
dataset_size:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
44 |
---
|
45 |
|
46 |
# Dataset Card for [Dataset Name]
|
|
|
34 |
'1': '1'
|
35 |
splits:
|
36 |
- name: train
|
37 |
+
num_bytes: 1388678
|
38 |
num_examples: 4154
|
39 |
- name: test
|
40 |
+
num_bytes: 353631
|
41 |
num_examples: 1029
|
42 |
+
download_size: 1125972
|
43 |
+
dataset_size: 1742309
|
44 |
+
configs:
|
45 |
+
- config_name: default
|
46 |
+
data_files:
|
47 |
+
- split: train
|
48 |
+
path: data/train-*
|
49 |
+
- split: test
|
50 |
+
path: data/test-*
|
51 |
---
|
52 |
|
53 |
# Dataset Card for [Dataset Name]
|
data/test-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7584de77834a94620ef7427dab14c2c7ac36de23c925e2f13d95578c9cffecd
|
3 |
+
size 211619
|
data/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7d110cdb9ae7476760406acf13f0110a3ba57861965db59a9adc0ff41a593de
|
3 |
+
size 914353
|
dyk.py
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
# coding=utf-8
|
2 |
-
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
|
3 |
-
#
|
4 |
-
# Licensed under the Apache License, Version 2.0 (the "License");
|
5 |
-
# you may not use this file except in compliance with the License.
|
6 |
-
# You may obtain a copy of the License at
|
7 |
-
#
|
8 |
-
# http://www.apache.org/licenses/LICENSE-2.0
|
9 |
-
#
|
10 |
-
# Unless required by applicable law or agreed to in writing, software
|
11 |
-
# distributed under the License is distributed on an "AS IS" BASIS,
|
12 |
-
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
13 |
-
# See the License for the specific language governing permissions and
|
14 |
-
# limitations under the License.
|
15 |
-
"""Did You Know? dataset"""
|
16 |
-
|
17 |
-
|
18 |
-
import csv
|
19 |
-
import os
|
20 |
-
|
21 |
-
import datasets
|
22 |
-
|
23 |
-
|
24 |
-
_CITATION = """\
|
25 |
-
@inproceedings{marcinczuk2013open,
|
26 |
-
title={Open dataset for development of Polish Question Answering systems},
|
27 |
-
author={Marcinczuk, Michal and Ptak, Marcin and Radziszewski, Adam and Piasecki, Maciej},
|
28 |
-
booktitle={Proceedings of the 6th Language & Technology Conference: Human Language Technologies as a Challenge for Computer Science and Linguistics, Wydawnictwo Poznanskie, Fundacja Uniwersytetu im. Adama Mickiewicza},
|
29 |
-
year={2013}
|
30 |
-
}
|
31 |
-
"""
|
32 |
-
|
33 |
-
_DESCRIPTION = """\
|
34 |
-
The Did You Know (pol. Czy wiesz?) dataset consists of human-annotated question-answer pairs. The task is to predict if the answer is correct. We chose the negatives which have the largest token overlap with a question.
|
35 |
-
"""
|
36 |
-
|
37 |
-
_HOMEPAGE = "http://nlp.pwr.wroc.pl/en/tools-and-resources/resources/czy-wiesz-question-answering-dataset"
|
38 |
-
|
39 |
-
_LICENSE = "CC BY-SA 3.0"
|
40 |
-
|
41 |
-
_URLs = "https://klejbenchmark.com/static/data/klej_dyk.zip"
|
42 |
-
|
43 |
-
|
44 |
-
class DYK(datasets.GeneratorBasedBuilder):
|
45 |
-
"""Did You Know? Dataset"""
|
46 |
-
|
47 |
-
VERSION = datasets.Version("1.1.0")
|
48 |
-
|
49 |
-
def _info(self):
|
50 |
-
return datasets.DatasetInfo(
|
51 |
-
description=_DESCRIPTION,
|
52 |
-
features=datasets.Features(
|
53 |
-
{
|
54 |
-
"q_id": datasets.Value("string"),
|
55 |
-
"question": datasets.Value("string"),
|
56 |
-
"answer": datasets.Value("string"),
|
57 |
-
"target": datasets.ClassLabel(names=["0", "1"]),
|
58 |
-
}
|
59 |
-
),
|
60 |
-
supervised_keys=None,
|
61 |
-
homepage=_HOMEPAGE,
|
62 |
-
license=_LICENSE,
|
63 |
-
citation=_CITATION,
|
64 |
-
)
|
65 |
-
|
66 |
-
def _split_generators(self, dl_manager):
|
67 |
-
"""Returns SplitGenerators."""
|
68 |
-
data_dir = dl_manager.download_and_extract(_URLs)
|
69 |
-
return [
|
70 |
-
datasets.SplitGenerator(
|
71 |
-
name=datasets.Split.TRAIN,
|
72 |
-
gen_kwargs={
|
73 |
-
"filepath": os.path.join(data_dir, "train.tsv"),
|
74 |
-
"split": "train",
|
75 |
-
},
|
76 |
-
),
|
77 |
-
datasets.SplitGenerator(
|
78 |
-
name=datasets.Split.TEST,
|
79 |
-
gen_kwargs={"filepath": os.path.join(data_dir, "test_features.tsv"), "split": "test"},
|
80 |
-
),
|
81 |
-
]
|
82 |
-
|
83 |
-
def _generate_examples(self, filepath, split):
|
84 |
-
"""Yields examples."""
|
85 |
-
with open(filepath, encoding="utf-8") as f:
|
86 |
-
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
87 |
-
for id_, row in enumerate(reader):
|
88 |
-
yield id_, {
|
89 |
-
"q_id": row["q_id"],
|
90 |
-
"question": row["question"],
|
91 |
-
"answer": row["answer"],
|
92 |
-
"target": -1 if split == "test" else row["target"],
|
93 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|