Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
IndicParaphrase / IndicParaphrase.py
himani's picture
Add dataset loading script
5c55837
raw
history blame
2.67 kB
import json
import os
import datasets
_CITATION = """\
"""
_DESCRIPTION = """\
"""
_HOMEPAGE = " "
_LICENSE = " "
_URL = "https://huggingface.co/datasets/ai4bharat/IndicParaphrase/resolve/main/data/{}_IndicParaphrase_v{}.tar.bz2"
_LANGUAGES = [
"as",
"bn",
"gu",
"hi",
"kn",
"ml",
"mr",
"or",
"pa",
"ta",
"te"
]
class Xlsum(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="{}".format(lang),
version=datasets.Version("1.0.0")
)
for lang in _LANGUAGES
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"input": datasets.Value("string"),
"target": datasets.Value("string"),
"references": [datasets.Value("string")]
}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
license=_LICENSE,
version=self.VERSION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
lang = str(self.config.name)
url = _URL.format(lang, self.VERSION.version_str[:-2])
data_dir = dl_manager.download_and_extract(url)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, "train_" + lang + ".jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, "test_" + lang + ".jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, "dev_" + lang + ".jsonl"),
},
),
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
for idx_, row in enumerate(f):
data = json.loads(row)
yield idx_, {
"id": data["id"],
"input": data["input"],
"target": data["target"],
"references": data["references"]
}