# Loading script for the COPA-ca dataset. import json import datasets logger = datasets.logging.get_logger(__name__) # _CITATION = "" _DESCRIPTION = """\ The COPA-ca dataset (Choice of plausible alternatives in Catalan) is a professional translation of the English COPA dataset into Catalan, commissioned by BSC LangTech Unit. The dataset consists of 1000 premises, each given a question and two choices with a label encoding which of the choices is more plausible given the annotator. The dataset is split into 400 training samples, 100 validation samples, and 500 test samples. It includes the following features: 'premise', 'choice1', 'choice2', 'label', 'question', 'changed' (boolean). This work is licensed under a Attribution-ShareAlike 4.0 International License. """ _HOMEPAGE = "https://zenodo.org/record/8124398" _URL = "https://huggingface.co/datasets/projecte-aina/copa-ca/resolve/main/" _TRAINING_FILE = "copa-ca.train.jsonl" _DEV_FILE = "copa-ca.val.jsonl" _TEST_FILE = "copa-ca.test.jsonl" class copaCaConfig(datasets.GeneratorBasedBuilder): """ COPA-ca Dataset """ BUILDER_CONFIGS = [ copaCaConfig( name="copa-ca", version=datasets.Version("1.0.1"), description="COPA-ca dataset", ), ] def _info(self): return datasets.DatasetInfo( description=DESCRIPTION, features=datasets.Features( { "premise": datasets.Value("string"), "choice1": datasets.Value("string"), "choice2": datasets.Value("string"), "question": datasets.Value("string"), 'label': datasets.features.ClassLabel(names=['0', '1']), "idx": datasets.Value("int64"), "changed": datasets.Value("bool"), } ), homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" urls_to_download = { "train": f"{_URL}{_TRAIN_FILE}", "val": f"{_URL}{_DEV_FILE}", "test": f"{_URL}{_TEST_FILE}", } downloaded_files = dl_manager.download_and_extract(urls_to_download) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}), ] def _generate_examples(self, filepath, split): with open(filepath, encoding='utf-8') as f: for i, line in enumerate(f): data = json.loads(line) yield i, { 'premise': data['premise'], 'choice1': data['choice1'], 'choice2': data['choice2'], 'question': data['question'], 'label': str(data['label']), 'idx': data['idx'], 'changed': data['changed'] }