Datasets:
File size: 4,401 Bytes
eebde30 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 |
# coding=utf-8
# Lint as: python3
"""IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
import os
import json
import pandas as pd
import datasets
from datasets import DownloadManager
_CITATION = """\
@misc{aggarwal2023evaluating,
title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
year={2023},
eprint={2304.13005},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
"""
_LANGUAGES = (
'hi',
'bn',
'mr',
'as',
'ta',
'te',
'or',
'ml',
'pa',
'gu',
'kn'
)
_DATASETS = (
'itop',
'indic-atis',
'indic-TOP'
)
_URL = "https://huggingface.co/datasets/Divyanshu/IE-SemParse/resolve/main/"
class IESemParseConfig(datasets.BuilderConfig):
"""BuilderConfig for IE-SemParse."""
def __init__(self, dataset: str, language: str, **kwargs):
"""BuilderConfig for IE-SemParse.
Args:
language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
**kwargs: keyword arguments forwarded to super.
"""
super(IESemParseConfig, self).__init__(**kwargs)
self.dataset = dataset
self.language = language
self.languages = _LANGUAGES
self.datasets = _DATASETS
self._URLS = [os.path.join(
_URL, "unfiltered_data", dataset, f"{language}.json")]
class IESemParse(datasets.GeneratorBasedBuilder):
"""IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
VERSION = datasets.Version("1.0.0", "")
BUILDER_CONFIG_CLASS = IESemParseConfig
BUILDER_CONFIGS = [
IESemParseConfig(
name=f"{dataset}_{language}",
language=language,
dataset=dataset,
version=datasets.Version("1.0.0", ""),
description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
)
for language, dataset in zip(_LANGUAGES, _DATASETS)
]
def _info(self):
dl_manager = datasets.DownloadManager()
urls_to_download = self.config._URLS
filepath = dl_manager.download_and_extract(urls_to_download)[0]
with open(filepath, "r") as f:
data = json.load(f)
data = data[list(data.keys())[0]]
features = datasets.Features(
{k: datasets.Value("string") for k in data[0].keys()}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
# No default supervised_keys (as we have to pass both premise
# and hypothesis as input).
supervised_keys=None,
homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls_to_download = self.config._URLS
downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"split_key": "train",
"file_path": downloaded_file,
"data_format": "IE-SemParse"
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"split_key": "test",
"files": downloaded_file,
"data_format": "IE-SemParse"
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"split_key": "val",
"files": downloaded_file,
"data_format": "IE-SemParse"
},
),
]
def _generate_examples(self, data_format, filepath):
"""This function returns the examples in the raw (text) form."""
with open(filepath, "r") as f:
data = json.load(f)
data = data[list(data.keys())[0]]
for idx, row in enumerate(data):
yield idx, {
k: v for k, v in data.items()
}
|