IE_SemParse / .history /IE_SemParse_20230708001833.py
Divyanshu's picture
upd
77da6f3
# coding=utf-8
# Lint as: python3
"""IndicXNLI: The Cross-Lingual NLI Corpus for Indic Languages."""
import os
import json
import pandas as pd
import datasets
from datasets import DownloadManager
_CITATION = """\
@misc{aggarwal2023evaluating,
title={Evaluating Inter-Bilingual Semantic Parsing for Indian Languages},
author={Divyanshu Aggarwal and Vivek Gupta and Anoop Kunchukuttan},
year={2023},
eprint={2304.13005},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
IE-SemParse is an Inter-bilingual Seq2seq Semantic parsing dataset for 11 distinct Indian languages
"""
_LANGUAGES = (
'hi',
'bn',
'mr',
'as',
'ta',
'te',
'or',
'ml',
'pa',
'gu',
'kn'
)
_DATASETS = (
'itop',
'indic-atis',
'indic-TOP'
)
mapping = {"itop": "IE-mTOP",
"indic-atis": "IE-ATIS",
"indic-TOP": "IE-multilingualTOP"}
_URL = "https://huggingface.co/datasets/Divyanshu/IE_SemParse/resolve/main/"
class IE_SemParseConfig(datasets.BuilderConfig):
"""BuilderConfig for IE-SemParse."""
def __init__(self, dataset: str, language: str, **kwargs):
"""BuilderConfig for IE-SemParse.
Args:
language: One of hi, bn, mr, as, ta, te, or, ml, pa, gu, kn
**kwargs: keyword arguments forwarded to super.
"""
super(IE_SemParseConfig, self).__init__(**kwargs)
self.dataset = dataset
self.language = language
self.languages = _LANGUAGES
self.datasets = _DATASETS
self._URLS = [os.path.join(
_URL, "unfiltered_data", dataset, f"{language}.json")]
class IE_SemParse(datasets.GeneratorBasedBuilder):
"""IE-SemParse: Inter-Bilingual Semantic Parsing Dataset for Indic Languages. Version 1.0."""
VERSION = datasets.Version("1.0.0", "")
BUILDER_CONFIG_CLASS = IE_SemParseConfig
BUILDER_CONFIGS = [
IE_SemParseConfig(
name=f"{dataset}_{language}",
language=language,
dataset=dataset,
version=datasets.Version("1.0.0", ""),
description=f"Plain text import of IE-SemParse for the {language} language for {dataset} dataset",
)
for language, dataset in zip(_LANGUAGES, _DATASETS)
]
def _info(self):
dl_manager = datasets.DownloadManager()
urls_to_download = self.config._URLS
filepath = dl_manager.download_and_extract(urls_to_download)[0]
with open(filepath, "r") as f:
data = json.load(f)
features = datasets.Features(
{k: datasets.Value("string") for k in data['train'][0].keys()}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
# No default supervised_keys (as we have to pass both premise
# and hypothesis as input).
supervised_keys=None,
homepage="https://github.com/divyanshuaggarwal/IE-SemParse",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
urls_to_download = self.config._URLS
downloaded_file = dl_manager.download_and_extract(urls_to_download)[0]
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"split_key": "train",
"filepath": downloaded_file,
"data_format": "IE-SemParse"
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"split_key": "test",
"filepath": downloaded_file,
"data_format": "IE-SemParse"
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"split_key": "val",
"filepath": downloaded_file,
"data_format": "IE-SemParse"
},
),
]
def _generate_examples(self, data_format, split_key, filepath):
"""This function returns the examples in the raw (text) form."""
with open(filepath, "r") as f:
data = json.load(f)
data = data[split_key]
for idx, row in enumerate(data):
yield idx, {
k: v for k, v in row.items()
}