Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
parquet-converter commited on
Commit
e7be244
1 Parent(s): c9f2ce7

Update parquet files

Browse files
.gitattributes DELETED
@@ -1,41 +0,0 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bin.* filter=lfs diff=lfs merge=lfs -text
5
- *.bz2 filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.model filter=lfs diff=lfs merge=lfs -text
12
- *.msgpack filter=lfs diff=lfs merge=lfs -text
13
- *.onnx filter=lfs diff=lfs merge=lfs -text
14
- *.ot filter=lfs diff=lfs merge=lfs -text
15
- *.parquet filter=lfs diff=lfs merge=lfs -text
16
- *.pb filter=lfs diff=lfs merge=lfs -text
17
- *.pt filter=lfs diff=lfs merge=lfs -text
18
- *.pth filter=lfs diff=lfs merge=lfs -text
19
- *.rar filter=lfs diff=lfs merge=lfs -text
20
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
- *.tar.* filter=lfs diff=lfs merge=lfs -text
22
- *.tflite filter=lfs diff=lfs merge=lfs -text
23
- *.tgz filter=lfs diff=lfs merge=lfs -text
24
- *.xz filter=lfs diff=lfs merge=lfs -text
25
- *.zip filter=lfs diff=lfs merge=lfs -text
26
- *.zstandard filter=lfs diff=lfs merge=lfs -text
27
- *tfevents* filter=lfs diff=lfs merge=lfs -text
28
- # Audio files - uncompressed
29
- *.pcm filter=lfs diff=lfs merge=lfs -text
30
- *.sam filter=lfs diff=lfs merge=lfs -text
31
- *.raw filter=lfs diff=lfs merge=lfs -text
32
- # Audio files - compressed
33
- *.aac filter=lfs diff=lfs merge=lfs -text
34
- *.flac filter=lfs diff=lfs merge=lfs -text
35
- *.mp3 filter=lfs diff=lfs merge=lfs -text
36
- *.ogg filter=lfs diff=lfs merge=lfs -text
37
- *.wav filter=lfs diff=lfs merge=lfs -text
38
- data/test.jsonl filter=lfs diff=lfs merge=lfs -text
39
- data/train.jsonl filter=lfs diff=lfs merge=lfs -text
40
- data/validation.jsonl filter=lfs diff=lfs merge=lfs -text
41
- data_dr/train_deepl.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- license: cc-by-sa-3.0
3
- ---
4
- # CsFEVER experimental Fact-Checking dataset
5
-
6
- Czech dataset for fact verification localized from the data points of [FEVER](https://arxiv.org/abs/1803.05355) using the localization scheme described in the [CTKFacts: Czech Datasets for Fact Verification](https://arxiv.org/abs/2201.11115) paper which is currently being revised for publication in LREV journal.
7
-
8
- The version you are looking at was reformatted to *Claim*-*Evidence* string pairs for the specific task of NLI - a more general Document-Retrieval-ready interpretation of our datapoints which can be used for training and evaluating the DR models over the June 2016 wikipedia snapshot can be found in the [data_dr]() folder in the JSON Lines format.
9
-
10
- ## Data Statement
11
-
12
- ### Curation Rationale
13
-
14
- TODO
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
csfever.py DELETED
@@ -1,92 +0,0 @@
1
- import os
2
- import pathlib
3
- from typing import overload
4
- import datasets
5
- import json
6
-
7
- from datasets.info import DatasetInfo
8
-
9
- _VERSION = "0.0.1"
10
-
11
- _URL= "data/"
12
-
13
- _URLS = {
14
- "train": _URL + "train.jsonl",
15
- "validation": _URL + "validation.jsonl",
16
- "test": _URL + "test.jsonl"
17
- }
18
-
19
- _DESCRIPTION = """\
20
- CsFEVER is a Czech localisation of the English FEVER datgaset.
21
- """
22
-
23
- _CITATION = """\
24
- @article{DBLP:journals/corr/abs-2201-11115,
25
- author = {Jan Drchal and
26
- Herbert Ullrich and
27
- Martin R{\'{y}}par and
28
- Hana Vincourov{\'{a}} and
29
- V{\'{a}}clav Moravec},
30
- title = {CsFEVER and CTKFacts: Czech Datasets for Fact Verification},
31
- journal = {CoRR},
32
- volume = {abs/2201.11115},
33
- year = {2022},
34
- url = {https://arxiv.org/abs/2201.11115},
35
- eprinttype = {arXiv},
36
- eprint = {2201.11115},
37
- timestamp = {Tue, 01 Feb 2022 14:59:01 +0100},
38
- biburl = {https://dblp.org/rec/journals/corr/abs-2201-11115.bib},
39
- bibsource = {dblp computer science bibliography, https://dblp.org}
40
- }
41
- """
42
-
43
- datasets.utils.version.Version
44
- class CsFever(datasets.GeneratorBasedBuilder):
45
- def _info(self):
46
- return datasets.DatasetInfo(
47
- description=_DESCRIPTION,
48
- features=datasets.Features(
49
- {
50
- "id": datasets.Value("int32"),
51
- "label": datasets.ClassLabel(names=["REFUTES", "NOT ENOUGH INFO", "SUPPORTS"]),
52
- # datasets.features.Sequence({"text": datasets.Value("string"),"answer_start": datasets.Value("int32"),})
53
- "evidence": datasets.Value("string"),
54
- "claim": datasets.Value("string"),
55
- }
56
- ),
57
- # No default supervised_keys (as we have to pass both question
58
- # and context as input).
59
- supervised_keys=None,
60
- version=_VERSION,
61
- homepage="https://fcheck.fel.cvut.cz/dataset/",
62
- citation=_CITATION,
63
- )
64
-
65
- def _split_generators(self, dl_manager: datasets.DownloadManager):
66
- downloaded_files = dl_manager.download_and_extract(_URLS)
67
-
68
- return [
69
- datasets.SplitGenerator(datasets.Split.TRAIN, {
70
- "filepath": downloaded_files["train"]
71
- }),
72
- datasets.SplitGenerator(datasets.Split.VALIDATION, {
73
- "filepath": downloaded_files["validation"]
74
- }),
75
- datasets.SplitGenerator(datasets.Split.TEST, {
76
- "filepath": downloaded_files["test"]
77
- }),
78
- ]
79
-
80
- def _generate_examples(self, filepath):
81
- """This function returns the examples in the raw (text) form."""
82
- key = 0
83
- with open(filepath, encoding="utf-8") as f:
84
- for line in f:
85
- datapoint = json.loads(line)
86
- yield key, {
87
- "id": datapoint["id"],
88
- "evidence": " ".join(datapoint["evidence"]),
89
- "claim": datapoint["claim"],
90
- "label": datapoint["label"]
91
- }
92
- key += 1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/.gitignore DELETED
File without changes
data/train.jsonl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8fde592187ebd98cbe1ce58aa78fe84a0c6f0fa6d367e1ebc49d7bbce272524c
3
- size 168449812
 
 
 
 
data_dr/.gitignore DELETED
File without changes
data_dr/dev_deepl.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data_dr/test_deepl.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
data/test.jsonl → default/csfever-test.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c078bfcc1f6b28e86dba58f79d318b1bd552859f48247e29af2d112579b98eaa
3
- size 15499684
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9445a670642ffbe574f0665c6b4a95f23ea6d376fe257db9468b94b8eeb71158
3
+ size 7884012
data/validation.jsonl → default/csfever-train.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e261591aa4c0da6d3f1b9f82070d079d47314fd280a9048d772faad3132ea8a
3
- size 15776086
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:029744fae866fc803f8158e75cab0fe5003e9843cee9f8645bc8eb6740590eb5
3
+ size 85614680
data_dr/train_deepl.jsonl → default/csfever-validation.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:107dbafcba054c1bf23e097778d0d77a1be006bce42e48265ea5efe775a8d02f
3
- size 32401546
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bb8d52b017d05f1a081302af023fe250f97cce5fe3c660b1e403fbb5c634bc11
3
+ size 8048591
experiments.ipynb DELETED
@@ -1,211 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {},
6
- "source": [
7
- "# 🤗 Experiments over hf dataset"
8
- ]
9
- },
10
- {
11
- "cell_type": "code",
12
- "execution_count": 1,
13
- "metadata": {},
14
- "outputs": [],
15
- "source": [
16
- "import datasets\n",
17
- "from datasets import load_dataset"
18
- ]
19
- },
20
- {
21
- "cell_type": "code",
22
- "execution_count": 2,
23
- "metadata": {},
24
- "outputs": [
25
- {
26
- "name": "stderr",
27
- "output_type": "stream",
28
- "text": [
29
- "Using custom data configuration default\n",
30
- "Reusing dataset ctkfacts_nli (/Users/bertik/.cache/huggingface/datasets/ctkfacts_nli/default/0.0.0/5dcd805dfbd9694ead18f5cf4da8d902a1a1ca53685a5ebabd33f3d314dd597d)\n",
31
- "100%|██████████| 3/3 [00:00<00:00, 409.28it/s]\n"
32
- ]
33
- }
34
- ],
35
- "source": [
36
- "d=load_dataset(\"csfever.py\")"
37
- ]
38
- },
39
- {
40
- "cell_type": "code",
41
- "execution_count": 10,
42
- "metadata": {},
43
- "outputs": [
44
- {
45
- "data": {
46
- "text/plain": [
47
- "DatasetDict({\n",
48
- " train: Dataset({\n",
49
- " features: ['id', 'label', 'evidence', 'claim'],\n",
50
- " num_rows: 2903\n",
51
- " })\n",
52
- " validation: Dataset({\n",
53
- " features: ['id', 'label', 'evidence', 'claim'],\n",
54
- " num_rows: 377\n",
55
- " })\n",
56
- " test: Dataset({\n",
57
- " features: ['id', 'label', 'evidence', 'claim'],\n",
58
- " num_rows: 431\n",
59
- " })\n",
60
- "})"
61
- ]
62
- },
63
- "execution_count": 10,
64
- "metadata": {},
65
- "output_type": "execute_result"
66
- }
67
- ],
68
- "source": [
69
- "d"
70
- ]
71
- },
72
- {
73
- "cell_type": "code",
74
- "execution_count": 11,
75
- "metadata": {},
76
- "outputs": [
77
- {
78
- "name": "stdout",
79
- "output_type": "stream",
80
- "text": [
81
- "\n",
82
- " _| _| _| _| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _|_|_|_| _|_| _|_|_| _|_|_|_|\n",
83
- " _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n",
84
- " _|_|_|_| _| _| _| _|_| _| _|_| _| _| _| _| _| _|_| _|_|_| _|_|_|_| _| _|_|_|\n",
85
- " _| _| _| _| _| _| _| _| _| _| _|_| _| _| _| _| _| _| _|\n",
86
- " _| _| _|_| _|_|_| _|_|_| _|_|_| _| _| _|_|_| _| _| _| _|_|_| _|_|_|_|\n",
87
- "\n",
88
- " \n",
89
- "Username: ^C\n",
90
- "Traceback (most recent call last):\n",
91
- " File \"/opt/homebrew/bin/huggingface-cli\", line 8, in <module>\n",
92
- " sys.exit(main())\n",
93
- " File \"/opt/homebrew/lib/python3.9/site-packages/huggingface_hub/commands/huggingface_cli.py\", line 41, in main\n",
94
- " service.run()\n",
95
- " File \"/opt/homebrew/lib/python3.9/site-packages/huggingface_hub/commands/user.py\", line 169, in run\n",
96
- " username = input(\"Username: \")\n",
97
- "KeyboardInterrupt\n"
98
- ]
99
- }
100
- ],
101
- "source": [
102
- "!huggingface-cli login"
103
- ]
104
- },
105
- {
106
- "cell_type": "code",
107
- "execution_count": 4,
108
- "metadata": {},
109
- "outputs": [
110
- {
111
- "name": "stderr",
112
- "output_type": "stream",
113
- "text": [
114
- "Downloading: 100%|██████████| 2.55k/2.55k [00:00<00:00, 1.33MB/s]\n",
115
- "Using custom data configuration default\n"
116
- ]
117
- },
118
- {
119
- "name": "stdout",
120
- "output_type": "stream",
121
- "text": [
122
- "Downloading and preparing dataset ctkfacts_nli/default to /Users/bertik/.cache/huggingface/datasets/heruberuto___ctkfacts_nli/default/0.0.0/5dcd805dfbd9694ead18f5cf4da8d902a1a1ca53685a5ebabd33f3d314dd597d...\n"
123
- ]
124
- },
125
- {
126
- "name": "stderr",
127
- "output_type": "stream",
128
- "text": [
129
- "Downloading: 100%|██████████| 2.14M/2.14M [00:00<00:00, 2.86MB/s]\n",
130
- "Downloading: 100%|██████████| 247k/247k [00:00<00:00, 386kB/s]\n",
131
- "Downloading: 100%|██████████| 287k/287k [00:00<00:00, 450kB/s]\n",
132
- "100%|██████████| 3/3 [00:04<00:00, 1.66s/it]\n",
133
- "100%|██████████| 3/3 [00:00<00:00, 976.56it/s]\n"
134
- ]
135
- },
136
- {
137
- "name": "stdout",
138
- "output_type": "stream",
139
- "text": [
140
- "Dataset ctkfacts_nli downloaded and prepared to /Users/bertik/.cache/huggingface/datasets/heruberuto___ctkfacts_nli/default/0.0.0/5dcd805dfbd9694ead18f5cf4da8d902a1a1ca53685a5ebabd33f3d314dd597d. Subsequent calls will reuse this data.\n"
141
- ]
142
- },
143
- {
144
- "name": "stderr",
145
- "output_type": "stream",
146
- "text": [
147
- "100%|██████████| 3/3 [00:00<00:00, 811.96it/s]\n"
148
- ]
149
- },
150
- {
151
- "data": {
152
- "text/plain": [
153
- "DatasetDict({\n",
154
- " train: Dataset({\n",
155
- " features: ['id', 'label', 'evidence', 'claim'],\n",
156
- " num_rows: 2903\n",
157
- " })\n",
158
- " validation: Dataset({\n",
159
- " features: ['id', 'label', 'evidence', 'claim'],\n",
160
- " num_rows: 377\n",
161
- " })\n",
162
- " test: Dataset({\n",
163
- " features: ['id', 'label', 'evidence', 'claim'],\n",
164
- " num_rows: 431\n",
165
- " })\n",
166
- "})"
167
- ]
168
- },
169
- "execution_count": 4,
170
- "metadata": {},
171
- "output_type": "execute_result"
172
- }
173
- ],
174
- "source": [
175
- "load_dataset(\"heruberuto/ctkfacts_nli\", use_auth_token=True)"
176
- ]
177
- },
178
- {
179
- "cell_type": "code",
180
- "execution_count": null,
181
- "metadata": {},
182
- "outputs": [],
183
- "source": []
184
- }
185
- ],
186
- "metadata": {
187
- "interpreter": {
188
- "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e"
189
- },
190
- "kernelspec": {
191
- "display_name": "Python 3.9.7 64-bit",
192
- "language": "python",
193
- "name": "python3"
194
- },
195
- "language_info": {
196
- "codemirror_mode": {
197
- "name": "ipython",
198
- "version": 3
199
- },
200
- "file_extension": ".py",
201
- "mimetype": "text/x-python",
202
- "name": "python",
203
- "nbconvert_exporter": "python",
204
- "pygments_lexer": "ipython3",
205
- "version": "3.9.7"
206
- },
207
- "orig_nbformat": 4
208
- },
209
- "nbformat": 4,
210
- "nbformat_minor": 2
211
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
generate_readme.py DELETED
File without changes
test.py DELETED
@@ -1,3 +0,0 @@
1
- import datasets
2
-
3
- datasets.load_dataset("csfever.py")