|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""ted2020_tw_mt""" |
|
|
|
|
|
import csv |
|
import json |
|
import os |
|
import datasets |
|
|
|
|
|
|
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {中文 Aya collection}, |
|
author={Heng-Shiou Sheu |
|
}, |
|
year={2024} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
是一個精心策劃的資料集,源自 CohereForAI 的綜合 Aya 集合,特別關注繁體中文資料。 |
|
此資料集聚合了 CohereForAI/aya_collection、CohereForAI/aya_dataset 和 CohereForAI/aya_evaluation_suite 中的內容, |
|
過濾掉除中文內容之外的所有內容,包括繁體中文與簡體中文。 |
|
""" |
|
|
|
|
|
_Subset_names = [ |
|
'aya_dataset', |
|
'templated_ntx_llm', |
|
'templated_uner_llm', |
|
'templated_xcsqa', |
|
'templated_xlel_wd', |
|
'templated_xwikis', |
|
'translated_adversarial_qa', |
|
'translated_cnn_dailymail', |
|
'translated_dolly', |
|
'translated_flan_coqa', |
|
'translated_flan_cot', |
|
'translated_flan_gem_wiki', |
|
'translated_flan_lambada', |
|
'translated_flan_qa', |
|
'translated_hotpotqa', |
|
'translated_joke_explaination', |
|
'translated_mintaka', |
|
'translated_mlqa', |
|
'translated_nqopen', |
|
'translated_paws', |
|
'translated_piqa', |
|
'translated_wikiqa' |
|
] |
|
|
|
|
|
_HOMEPAGE = "https://huggingface.co/Heng666" |
|
|
|
_LICENSE = "apache-2.0" |
|
|
|
|
|
|
|
_URLS = { |
|
"aya_collection": "https://huggingface.co/datasets/CohereForAI/aya_collection", |
|
"aya_dataset": "https://huggingface.co/datasets/CohereForAI/aya_dataset", |
|
"evaluation_suite": "https://huggingface.co/datasets/CohereForAI/aya_evaluation_suite" |
|
} |
|
|
|
class ChineseAyaCollectionConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Chinese Aya""" |
|
|
|
def __init__(self, subset, **kwargs): |
|
super().__init__(**kwargs) |
|
""" |
|
Args: |
|
subset: subset, you want to load |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
self.subset = subset |
|
|
|
|
|
class ChineseAyaCollectionDataset(datasets.GeneratorBasedBuilder): |
|
"""TODO: Short description of my dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIG_CLASS = ChineseAyaCollectionConfig |
|
|
|
BUILDER_CONFIGS = [ |
|
ChineseAyaCollectionConfig( |
|
name=subset, |
|
description=_DESCRIPTION, |
|
subset=subset |
|
) |
|
for subset in _Subset_names |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
"id": datasets.Value("int64"), |
|
"inputs": datasets.Value("string"), |
|
"targets": datasets.Value("string"), |
|
"dataset_name": datasets.Value("string"), |
|
"sub_dataset_name": datasets.Value("string"), |
|
"task_type": datasets.Value("string"), |
|
"template_id": datasets.Value("string"), |
|
"language_code": datasets.Value("string"), |
|
"split": datasets.Value("string"), |
|
"script": datasets.Value("string"), |
|
}), |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
|
|
subset = self.config.subset |
|
|
|
files = {} |
|
|
|
train_path = os.path.join("train/", f"CohereForAI-{subset}-train.csv") |
|
files["train"] = train_path |
|
test_path = os.path.join("test", f"CohereForAI-{subset}-test.csv") |
|
files["test"] = test_path |
|
validation_path = os.path.join("validation", f"CohereForAI-{subset}-validation.csv") |
|
files["validation"] = validation_path |
|
|
|
try: |
|
data_dir = dl_manager.download_and_extract(files) |
|
except: |
|
files.pop("test") |
|
files.pop("validation") |
|
data_dir = dl_manager.download_and_extract(files) |
|
|
|
output = [] |
|
if "train" in files: |
|
train = datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": data_dir["train"] |
|
} |
|
) |
|
output.append(train) |
|
|
|
if "test" in files: |
|
test = datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"filepath": data_dir["test"] |
|
} |
|
) |
|
output.append(test) |
|
|
|
if "validation" in files: |
|
validation = datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"filepath": data_dir["validation"] |
|
} |
|
) |
|
output.append(validation) |
|
|
|
return output |
|
|
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
with open(filepath, encoding="utf-8") as f: |
|
reader = csv.reader(f, delimiter=",", quotechar='"') |
|
for id_, row in enumerate(reader): |
|
if id_ == 0: |
|
continue |
|
yield id_, { |
|
"id": row[0], |
|
"inputs": row[1], |
|
"targets": row[2], |
|
"dataset_name": row[3], |
|
"sub_dataset_name": row[4], |
|
"task_type": row[5], |
|
"template_id": row[6], |
|
"language_code": row[7], |
|
"split": row[8], |
|
"script": row[9], |
|
} |