Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
heruberuto commited on
Commit
4521650
1 Parent(s): ba75263

First alpha of data and stuff

Browse files
ctkfacts_nli.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ from typing import overload
4
+ import datasets
5
+ import json
6
+
7
+ from datasets.info import DatasetInfo
8
+
9
+ _DESCRIPTION = """\
10
+ CtkFactsNLI is a NLI version of the Czech CTKFacts dataset
11
+ """
12
+
13
+ _CITATION = """\
14
+ todo
15
+ """
16
+
17
+
18
+ class CtkfactsNli(datasets.GeneratorBasedBuilder):
19
+ def _info(self):
20
+ return datasets.DatasetInfo(
21
+ description=_DESCRIPTION,
22
+ features=datasets.Features(
23
+ {
24
+ "id": datasets.Value("int32"),
25
+ "label": datasets.ClassLabel(names=["SUPPORTS", "REFUTES", "NOT ENOUGH INFO"]),
26
+ # datasets.features.Sequence({"text": datasets.Value("string"),"answer_start": datasets.Value("int32"),})
27
+ "evidence": datasets.Value("string"),
28
+ "claim": datasets.Value("string"),
29
+ }
30
+ ),
31
+ # No default supervised_keys (as we have to pass both question
32
+ # and context as input).
33
+ supervised_keys=None,
34
+ homepage="https://fcheck.fel.cvut.cz/dataset/",
35
+ citation=_CITATION,
36
+ )
37
+
38
+ def _split_generators(self, dl_managers):
39
+ dir = pathlib.Path(__file__).parent.resolve()
40
+ return [
41
+ datasets.SplitGenerator(datasets.Split.TRAIN, {
42
+ "filepath": os.path.join(dir, "data", "train.jsonl")
43
+ }),
44
+ datasets.SplitGenerator(datasets.Split.VALIDATION, {
45
+ "filepath": os.path.join(dir, "data", "validation.jsonl")
46
+ }),
47
+ datasets.SplitGenerator(datasets.Split.TEST, {
48
+ "filepath": os.path.join(dir, "data", "test.jsonl")
49
+ }),
50
+ ]
51
+
52
+ def _generate_examples(self, filepath):
53
+ """This function returns the examples in the raw (text) form."""
54
+ key = 0
55
+ with open(filepath, encoding="utf-8") as f:
56
+ for line in f:
57
+ datapoint = json.loads(line)
58
+ yield key, {
59
+ "id": datapoint["id"],
60
+ "evidence": " ".join(datapoint["evidence"]),
61
+ "claim": datapoint["claim"],
62
+ "label": datapoint["label"]
63
+ }
64
+ key += 1
data/test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/train.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
data/validation.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
experiments.ipynb ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# 🤗 Experiments over hf dataset"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": 1,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "import datasets\n",
17
+ "from datasets import load_dataset"
18
+ ]
19
+ },
20
+ {
21
+ "cell_type": "code",
22
+ "execution_count": 2,
23
+ "metadata": {},
24
+ "outputs": [
25
+ {
26
+ "name": "stderr",
27
+ "output_type": "stream",
28
+ "text": [
29
+ "Using custom data configuration default\n"
30
+ ]
31
+ },
32
+ {
33
+ "ename": "NotImplementedError",
34
+ "evalue": "",
35
+ "output_type": "error",
36
+ "traceback": [
37
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
38
+ "\u001b[0;31mNotImplementedError\u001b[0m Traceback (most recent call last)",
39
+ "\u001b[0;32m/var/folders/6z/tk1x_gtj40lgc1zd7y9pql3h0000gn/T/ipykernel_83405/3889717483.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mload_dataset\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"ctkfacts_nli.py\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
40
+ "\u001b[0;32m/opt/homebrew/lib/python3.9/site-packages/datasets/load.py\u001b[0m in \u001b[0;36mload_dataset\u001b[0;34m(path, name, data_dir, data_files, split, cache_dir, features, download_config, download_mode, ignore_verifications, keep_in_memory, save_infos, revision, use_auth_token, task, streaming, script_version, **config_kwargs)\u001b[0m\n\u001b[1;32m 1602\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1603\u001b[0m \u001b[0;31m# Create a dataset builder\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1604\u001b[0;31m builder_instance = load_dataset_builder(\n\u001b[0m\u001b[1;32m 1605\u001b[0m \u001b[0mpath\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mpath\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1606\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
41
+ "\u001b[0;32m/opt/homebrew/lib/python3.9/site-packages/datasets/load.py\u001b[0m in \u001b[0;36mload_dataset_builder\u001b[0;34m(path, name, data_dir, data_files, cache_dir, features, download_config, download_mode, revision, use_auth_token, script_version, **config_kwargs)\u001b[0m\n\u001b[1;32m 1460\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1461\u001b[0m \u001b[0;31m# Instantiate the dataset builder\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1462\u001b[0;31m builder_instance: DatasetBuilder = builder_cls(\n\u001b[0m\u001b[1;32m 1463\u001b[0m \u001b[0mcache_dir\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcache_dir\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1464\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
42
+ "\u001b[0;32m/opt/homebrew/lib/python3.9/site-packages/datasets/builder.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, writer_batch_size, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1041\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1042\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwriter_batch_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mNone\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1043\u001b[0;31m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGeneratorBasedBuilder\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1044\u001b[0m \u001b[0;31m# Batch size used by the ArrowWriter\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1045\u001b[0m \u001b[0;31m# It defines the number of samples that are kept in memory before writing them\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
43
+ "\u001b[0;32m/opt/homebrew/lib/python3.9/site-packages/datasets/builder.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, cache_dir, name, hash, base_path, features, use_auth_token, namespace, data_files, data_dir, **config_kwargs)\u001b[0m\n\u001b[1;32m 265\u001b[0m \u001b[0;31m# Prefill datasetinfo\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 266\u001b[0m \u001b[0minfo\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_exported_dataset_info\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 267\u001b[0;31m \u001b[0minfo\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mupdate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_info\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 268\u001b[0m \u001b[0minfo\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbuilder_name\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 269\u001b[0m \u001b[0minfo\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconfig_name\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
44
+ "\u001b[0;32m/opt/homebrew/lib/python3.9/site-packages/datasets/builder.py\u001b[0m in \u001b[0;36m_info\u001b[0;34m(self)\u001b[0m\n\u001b[1;32m 477\u001b[0m \u001b[0minfo\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mDatasetInfo\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0mThe\u001b[0m \u001b[0mdataset\u001b[0m \u001b[0minformation\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 478\u001b[0m \"\"\"\n\u001b[0;32m--> 479\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mNotImplementedError\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 480\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 481\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mclassmethod\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
45
+ "\u001b[0;31mNotImplementedError\u001b[0m: "
46
+ ]
47
+ }
48
+ ],
49
+ "source": [
50
+ "load_dataset(\"ctkfacts_nli.py\")"
51
+ ]
52
+ },
53
+ {
54
+ "cell_type": "code",
55
+ "execution_count": null,
56
+ "metadata": {},
57
+ "outputs": [],
58
+ "source": []
59
+ }
60
+ ],
61
+ "metadata": {
62
+ "interpreter": {
63
+ "hash": "b0fa6594d8f4cbf19f97940f81e996739fb7646882a419484c72d19e05852a7e"
64
+ },
65
+ "kernelspec": {
66
+ "display_name": "Python 3.9.7 64-bit",
67
+ "language": "python",
68
+ "name": "python3"
69
+ },
70
+ "language_info": {
71
+ "codemirror_mode": {
72
+ "name": "ipython",
73
+ "version": 3
74
+ },
75
+ "file_extension": ".py",
76
+ "mimetype": "text/x-python",
77
+ "name": "python",
78
+ "nbconvert_exporter": "python",
79
+ "pygments_lexer": "ipython3",
80
+ "version": "3.9.7"
81
+ },
82
+ "orig_nbformat": 4
83
+ },
84
+ "nbformat": 4,
85
+ "nbformat_minor": 2
86
+ }
generate_readme.py ADDED
File without changes
test.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ import datasets
2
+
3
+ datasets.load_dataset("ctkfacts_nli.py")