Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
herbievore commited on
Commit
cef968a
β€’
1 Parent(s): b7d5f57

CsFEVER initial descriptors

Browse files
Files changed (7) hide show
  1. README.md +11 -0
  2. csfever.py +92 -0
  3. data/.gitignore +0 -0
  4. data_dr/.gitignore +0 -0
  5. experiments.ipynb +211 -0
  6. generate_readme.py +0 -0
  7. test.py +3 -0
README.md CHANGED
@@ -1,3 +1,14 @@
1
  ---
2
  license: cc-by-sa-3.0
3
  ---
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
  license: cc-by-sa-3.0
3
  ---
4
+ # CsFEVER experimental Fact-Checking dataset
5
+
6
+ Czech dataset for fact verification localized from the data points of [FEVER](https://arxiv.org/abs/1803.05355) using the localization scheme described in the [CTKFacts: Czech Datasets for Fact Verification](https://arxiv.org/abs/2201.11115) paper which is currently being revised for publication in LREV journal.
7
+
8
+ The version you are looking at was reformatted to *Claim*-*Evidence* string pairs for the specific task of NLI - a more general Document-Retrieval-ready interpretation of our datapoints which can be used for training and evaluating the DR models over the June 2016 wikipedia snapshot can be found in the [data_dr]() folder in the JSON Lines format.
9
+
10
+ ## Data Statement
11
+
12
+ ### Curation Rationale
13
+
14
+ TODO
csfever.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ from typing import overload
4
+ import datasets
5
+ import json
6
+
7
+ from datasets.info import DatasetInfo
8
+
9
+ _VERSION = "0.0.1"
10
+
11
+ _URL= "data/"
12
+
13
+ _URLS = {
14
+ "train": _URL + "train.jsonl",
15
+ "validation": _URL + "validation.jsonl",
16
+ "test": _URL + "test.jsonl"
17
+ }
18
+
19
+ _DESCRIPTION = """\
20
+ CsFEVER is a Czech localisation of the English FEVER datgaset.
21
+ """
22
+
23
+ _CITATION = """\
24
+ @article{DBLP:journals/corr/abs-2201-11115,
25
+ author = {Jan Drchal and
26
+ Herbert Ullrich and
27
+ Martin R{\'{y}}par and
28
+ Hana Vincourov{\'{a}} and
29
+ V{\'{a}}clav Moravec},
30
+ title = {CsFEVER and CTKFacts: Czech Datasets for Fact Verification},
31
+ journal = {CoRR},
32
+ volume = {abs/2201.11115},
33
+ year = {2022},
34
+ url = {https://arxiv.org/abs/2201.11115},
35
+ eprinttype = {arXiv},
36
+ eprint = {2201.11115},
37
+ timestamp = {Tue, 01 Feb 2022 14:59:01 +0100},
38
+ biburl = {https://dblp.org/rec/journals/corr/abs-2201-11115.bib},
39
+ bibsource = {dblp computer science bibliography, https://dblp.org}
40
+ }
41
+ """
42
+
43
+ datasets.utils.version.Version
44
+ class CsFever(datasets.GeneratorBasedBuilder):
45
+ def _info(self):
46
+ return datasets.DatasetInfo(
47
+ description=_DESCRIPTION,
48
+ features=datasets.Features(
49
+ {
50
+ "id": datasets.Value("int32"),
51
+ "label": datasets.ClassLabel(names=["REFUTES", "NOT ENOUGH INFO", "SUPPORTS"]),
52
+ # datasets.features.Sequence({"text": datasets.Value("string"),"answer_start": datasets.Value("int32"),})
53
+ "evidence": datasets.Value("string"),
54
+ "claim": datasets.Value("string"),
55
+ }
56
+ ),
57
+ # No default supervised_keys (as we have to pass both question
58
+ # and context as input).
59
+ supervised_keys=None,
60
+ version=_VERSION,
61
+ homepage="https://fcheck.fel.cvut.cz/dataset/",
62
+ citation=_CITATION,
63
+ )
64
+
65
+ def _split_generators(self, dl_manager: datasets.DownloadManager):
66
+ downloaded_files = dl_manager.download_and_extract(_URLS)
67
+
68
+ return [
69
+ datasets.SplitGenerator(datasets.Split.TRAIN, {
70
+ "filepath": downloaded_files["train"]
71
+ }),
72
+ datasets.SplitGenerator(datasets.Split.VALIDATION, {
73
+ "filepath": downloaded_files["validation"]
74
+ }),
75
+ datasets.SplitGenerator(datasets.Split.TEST, {
76
+ "filepath": downloaded_files["test"]
77
+ }),
78
+ ]
79
+
80
+ def _generate_examples(self, filepath):
81
+ """This function returns the examples in the raw (text) form."""
82
+ key = 0
83
+ with open(filepath, encoding="utf-8") as f:
84
+ for line in f:
85
+ datapoint = json.loads(line)
86
+ yield key, {
87
+ "id": datapoint["id"],
88
+ "evidence": " ".join(datapoint["evidence"]),
89
+ "claim": datapoint["claim"],
90
+ "label": datapoint["label"]
91
+ }
92
+ key += 1
data/.gitignore ADDED
File without changes
data_dr/.gitignore ADDED
File without changes
experiments.ipynb ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# πŸ€— Experiments over hf dataset"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import datasets\n",
17
+ "from datasets import load_dataset"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": 2,
23
+ "metadata": {},
24
+ "outputs": [
25
+ {
26
+ "name": "stderr",
27
+ "output_type": "stream",
28
+ "text": [
29
+ "Using custom data configuration default\n",
30
+ "Reusing dataset ctkfacts_nli (/Users/bertik/.cache/huggingface/datasets/ctkfacts_nli/default/0.0.0/5dcd805dfbd9694ead18f5cf4da8d902a1a1ca53685a5ebabd33f3d314dd597d)\n",
31
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 3/3 [00:00<00:00, 409.28it/s]\n"
32
+ ]
33
+ }
34
+ ],
35
+ "source": [
36
+ "d=load_dataset(\"csfever.py\")"
37
+ ]
38
+ },
39
+ {
40
+ "cell_type": "code",
41
+ "execution_count": 10,
42
+ "metadata": {},
43
+ "outputs": [
44
+ {
45
+ "data": {
46
+ "text/plain": [
47
+ "DatasetDict({\n",
48
+ " train: Dataset({\n",
49
+ " features: ['id', 'label', 'evidence', 'claim'],\n",
50
+ " num_rows: 2903\n",
51
+ " })\n",
52
+ " validation: Dataset({\n",
53
+ " features: ['id', 'label', 'evidence', 'claim'],\n",
54
+ " num_rows: 377\n",
55
+ " })\n",
56
+ " test: Dataset({\n",
57
+ " features: ['id', 'label', 'evidence', 'claim'],\n",
58
+ " num_rows: 431\n",
59
+ " })\n",
60
+ "})"
61
+ ]
62
+ },
63
+ "execution_count": 10,
64
+ "metadata": {},
65
+ "output_type": "execute_result"
66
+ }
67
+ ],
68
+ "source": [
69
+ "d"
70
+ ]
71
+ },
72
+ {
73
+ "cell_type": "code",
74
+ "execution_count": 11,
75
+ "metadata": {},
76
+ "outputs": [
77
+ {
78
+ "name": "stdout",
79
+ "output_type": "stream",
80
+ "text": [
81
+ "\n",
82
+ " _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n",
83
+ " _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n",
84
+ " _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n",
85
+ " _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n",
86
+ " _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n",
87
+ "\n",
88
+ " \n",
89
+ "Username: ^C\n",
90
+ "Traceback (most recent call last):\n",
91
+ " File \"/opt/homebrew/bin/huggingface-cli\", line 8, in <module>\n",
92
+ " sys.exit(main())\n",
93
+ " File \"/opt/homebrew/lib/python3.9/site-packages/huggingface_hub/commands/huggingface_cli.py\", line 41, in main\n",
94
+ " service.run()\n",
95
+ " File \"/opt/homebrew/lib/python3.9/site-packages/huggingface_hub/commands/user.py\", line 169, in run\n",
96
+ " username = input(\"Username: \")\n",
97
+ "KeyboardInterrupt\n"
98
+ ]
99
+ }
100
+ ],
101
+ "source": [
102
+ "!huggingface-cli login"
103
+ ]
104
+ },
105
+ {
106
+ "cell_type": "code",
107
+ "execution_count": 4,
108
+ "metadata": {},
109
+ "outputs": [
110
+ {
111
+ "name": "stderr",
112
+ "output_type": "stream",
113
+ "text": [
114
+ "Downloading: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2.55k/2.55k [00:00<00:00, 1.33MB/s]\n",
115
+ "Using custom data configuration default\n"
116
+ ]
117
+ },
118
+ {
119
+ "name": "stdout",
120
+ "output_type": "stream",
121
+ "text": [
122
+ "Downloading and preparing dataset ctkfacts_nli/default to /Users/bertik/.cache/huggingface/datasets/heruberuto___ctkfacts_nli/default/0.0.0/5dcd805dfbd9694ead18f5cf4da8d902a1a1ca53685a5ebabd33f3d314dd597d...\n"
123
+ ]
124
+ },
125
+ {
126
+ "name": "stderr",
127
+ "output_type": "stream",
128
+ "text": [
129
+ "Downloading: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 2.14M/2.14M [00:00<00:00, 2.86MB/s]\n",
130
+ "Downloading: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 247k/247k [00:00<00:00, 386kB/s]\n",
131
+ "Downloading: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 287k/287k [00:00<00:00, 450kB/s]\n",
132
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 3/3 [00:04<00:00, 1.66s/it]\n",
133
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 3/3 [00:00<00:00, 976.56it/s]\n"
134
+ ]
135
+ },
136
+ {
137
+ "name": "stdout",
138
+ "output_type": "stream",
139
+ "text": [
140
+ "Dataset ctkfacts_nli downloaded and prepared to /Users/bertik/.cache/huggingface/datasets/heruberuto___ctkfacts_nli/default/0.0.0/5dcd805dfbd9694ead18f5cf4da8d902a1a1ca53685a5ebabd33f3d314dd597d. Subsequent calls will reuse this data.\n"
141
+ ]
142
+ },
143
+ {
144
+ "name": "stderr",
145
+ "output_type": "stream",
146
+ "text": [
147
+ "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 3/3 [00:00<00:00, 811.96it/s]\n"
148
+ ]
149
+ },
150
+ {
151
+ "data": {
152
+ "text/plain": [
153
+ "DatasetDict({\n",
154
+ " train: Dataset({\n",
155
+ " features: ['id', 'label', 'evidence', 'claim'],\n",
156
+ " num_rows: 2903\n",
157
+ " })\n",
158
+ " validation: Dataset({\n",
159
+ " features: ['id', 'label', 'evidence', 'claim'],\n",
160
+ " num_rows: 377\n",
161
+ " })\n",
162
+ " test: Dataset({\n",
163
+ " features: ['id', 'label', 'evidence', 'claim'],\n",
164
+ " num_rows: 431\n",
165
+ " })\n",
166
+ "})"
167
+ ]
168
+ },
169
+ "execution_count": 4,
170
+ "metadata": {},
171
+ "output_type": "execute_result"
172
+ }
173
+ ],
174
+ "source": [
175
+ "load_dataset(\"heruberuto/ctkfacts_nli\", use_auth_token=True)"
176
+ ]
177
+ },
178
+ {
179
+ "cell_type": "code",
180
+ "execution_count": null,
181
+ "metadata": {},
182
+ "outputs": [],
183
+ "source": []
184
+ }
185
+ ],
186
+ "metadata": {
187
+ "interpreter": {
188
+ "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e"
189
+ },
190
+ "kernelspec": {
191
+ "display_name": "Python 3.9.7 64-bit",
192
+ "language": "python",
193
+ "name": "python3"
194
+ },
195
+ "language_info": {
196
+ "codemirror_mode": {
197
+ "name": "ipython",
198
+ "version": 3
199
+ },
200
+ "file_extension": ".py",
201
+ "mimetype": "text/x-python",
202
+ "name": "python",
203
+ "nbconvert_exporter": "python",
204
+ "pygments_lexer": "ipython3",
205
+ "version": "3.9.7"
206
+ },
207
+ "orig_nbformat": 4
208
+ },
209
+ "nbformat": 4,
210
+ "nbformat_minor": 2
211
+ }
generate_readme.py ADDED
File without changes
test.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ import datasets
2
+
3
+ datasets.load_dataset("csfever.py")