dataset / dataset.py
Chun's picture
Update dataset.py
ef96052
# -*- coding: utf-8 -*-
"""dataset.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1sNXmgV-J4w6JSdtXK-0TckTP5SFkGPtz
###Create a file.py
"""
import datasets
import csv
import pandas as pd
_URLS = {
"zh-en": {
"TRAIN_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1z-reeSB_pAcZEJicRpBJWzrhuwdtJ-d1&export=download",
"VALIDATION_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1f1izEby8pfXZWG7htvcky_FL2iTMnoD5&export=download",
"TEST_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1VGM96MZvMuAPJoFzBeSpyC16IDSiu0vC&export=download"
}
}
class NewDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name = "zh-en",
version = VERSION,
description = "The translation dataset between Traditional Chinese and English"
)
]
def _info(self):
if self.config.name == "zh-en": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{ "translation": datasets.features.Translation( languages=["en", "zh"] ) }
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
# description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
# homepage=_HOMEPAGE,
# License for the dataset if available
# license=_LICENSE,
# Citation for the dataset
# citation=_CITATION,
)
def _split_generators(self, dl_manager):
""" Returns SplitGenerators"""
my_urls = _URLS[self.config.name]
train_path = dl_manager.download_and_extract(my_urls["TRAIN_DOWNLOAD_URL"])
validation_path = dl_manager.download_and_extract(my_urls["VALIDATION_DOWNLOAD_URL"])
test_path = dl_manager.download_and_extract(my_urls["TEST_DOWNLOAD_URL"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs = { "filepath" : train_path, "split" : "train" }
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs = { "filepath" : validation_path, "split" : "validation" }
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs = { "filepath" : test_path, "split" : "test"},
)
]
def _generate_examples(self, filepath, split):
""" Generate Dravidian MT examples"""
with open(filepath, encoding="utf-8") as f:
reader = csv.DictReader(f)
for idx, row in enumerate(reader):
if self.config.name == "zh-en":
result = { "translation" : { "en" : row["en"], "zh" : row["zh"] } }
yield idx, result