import os import pathlib from typing import overload import datasets import json from datasets.info import DatasetInfo _VERSION = "0.0.1" _URL= "data/" _URLS = { "train": _URL + "train.jsonl", "validation": _URL + "paper_dev.jsonl", "test": _URL + "paper_test.jsonl" } _DESCRIPTION = """\ EnfeverNLI is a NLI version of the fever dataset """ _CITATION = """\ todo """ datasets.utils.version.Version class EnfeverNli(datasets.GeneratorBasedBuilder): def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "label": datasets.ClassLabel(names=["REFUTES", "NOT ENOUGH INFO", "SUPPORTS"]), # datasets.features.Sequence({"text": datasets.Value("string"),"answer_start": datasets.Value("int32"),}) "evidence": datasets.Value("string"), "claim": datasets.Value("string"), } ), # No default supervised_keys (as we have to pass both question # and context as input). supervised_keys=None, version=_VERSION, homepage="https://fcheck.fel.cvut.cz/dataset/", citation=_CITATION, ) def _split_generators(self, dl_manager: datasets.DownloadManager): downloaded_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, { "filepath": downloaded_files["train"] }), datasets.SplitGenerator(datasets.Split.VALIDATION, { "filepath": downloaded_files["validation"] }), datasets.SplitGenerator(datasets.Split.TEST, { "filepath": downloaded_files["test"] }), ] def _generate_examples(self, filepath): """This function returns the examples in the raw (text) form.""" key = 0 with open(filepath, encoding="utf-8") as f: for line in f: datapoint = json.loads(line) yield key, { "id": datapoint["cid"], "evidence": datapoint["context"], "claim": datapoint["query"], "label": datapoint["label"] } key += 1