File size: 3,147 Bytes
26b4b74 ef96052 26b4b74 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
# -*- coding: utf-8 -*-
"""dataset.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1sNXmgV-J4w6JSdtXK-0TckTP5SFkGPtz
###Create a file.py
"""
import datasets
import csv
import pandas as pd
_URLS = {
"zh-en": {
"TRAIN_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1z-reeSB_pAcZEJicRpBJWzrhuwdtJ-d1&export=download",
"VALIDATION_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1f1izEby8pfXZWG7htvcky_FL2iTMnoD5&export=download",
"TEST_DOWNLOAD_URL": "https://drive.google.com/u/0/uc?id=1VGM96MZvMuAPJoFzBeSpyC16IDSiu0vC&export=download"
}
}
class NewDataset(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name = "zh-en",
version = VERSION,
description = "The translation dataset between Traditional Chinese and English"
)
]
def _info(self):
if self.config.name == "zh-en": # This is the name of the configuration selected in BUILDER_CONFIGS above
features = datasets.Features(
{ "translation": datasets.features.Translation( languages=["en", "zh"] ) }
)
return datasets.DatasetInfo(
# This is the description that will appear on the datasets page.
# description=_DESCRIPTION,
# This defines the different columns of the dataset and their types
features=features, # Here we define them above because they are different between the two configurations
# If there's a common (input, target) tuple from the features,
# specify them here. They'll be used if as_supervised=True in
# builder.as_dataset.
supervised_keys=None,
# Homepage of the dataset for documentation
# homepage=_HOMEPAGE,
# License for the dataset if available
# license=_LICENSE,
# Citation for the dataset
# citation=_CITATION,
)
def _split_generators(self, dl_manager):
""" Returns SplitGenerators"""
my_urls = _URLS[self.config.name]
train_path = dl_manager.download_and_extract(my_urls["TRAIN_DOWNLOAD_URL"])
validation_path = dl_manager.download_and_extract(my_urls["VALIDATION_DOWNLOAD_URL"])
test_path = dl_manager.download_and_extract(my_urls["TEST_DOWNLOAD_URL"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs = { "filepath" : train_path, "split" : "train" }
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs = { "filepath" : validation_path, "split" : "validation" }
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs = { "filepath" : test_path, "split" : "test"},
)
]
def _generate_examples(self, filepath, split):
""" Generate Dravidian MT examples"""
with open(filepath, encoding="utf-8") as f:
reader = csv.DictReader(f)
for idx, row in enumerate(reader):
if self.config.name == "zh-en":
result = { "translation" : { "en" : row["en"], "zh" : row["zh"] } }
yield idx, result |