Datasets:

Languages:
Bengali
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
machine-generated
Source Datasets:
extended
ArXiv:
Tags:
License:
xnli_bn / xnli_bn.py
abhik1505040's picture
Added files
a1e02ca
"""XNLI Bengali dataset"""
import json
import os
import datasets
_CITATION = """\
@misc{bhattacharjee2021banglabert,
title={BanglaBERT: Combating Embedding Barrier in Multilingual Models for Low-Resource Language Understanding},
author={Abhik Bhattacharjee and Tahmid Hasan and Kazi Samin and Md Saiful Islam and M. Sohel Rahman and Anindya Iqbal and Rifat Shahriyar},
year={2021},
eprint={2101.00204},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
"""
_DESCRIPTION = """\
This is a Natural Language Inference (NLI) dataset for Bengali, curated using the subset of
MNLI data used in XNLI and state-of-the-art English to Bengali translation model.
"""
_HOMEPAGE = "https://github.com/csebuetnlp/banglabert"
_LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
_URL = "https://huggingface.co/datasets/csebuetnlp/xnli_bn/resolve/main/data/xnli_bn.tar.bz2"
_VERSION = datasets.Version("0.0.1")
class XnliBn(datasets.GeneratorBasedBuilder):
"""XNLI Bengali dataset"""
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="xnli_bn",
version=_VERSION,
description=_DESCRIPTION,
)
]
def _info(self):
features = datasets.Features(
{
"sentence1": datasets.Value("string"),
"sentence2": datasets.Value("string"),
"label": datasets.features.ClassLabel(names=["contradiction", "entailment", "neutral"]),
}
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
version=_VERSION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
data_dir = os.path.join(dl_manager.download_and_extract(_URL), "xnli_bn")
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"filepath": os.path.join(data_dir, "train.jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={
"filepath": os.path.join(data_dir, "test.jsonl"),
},
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={
"filepath": os.path.join(data_dir, "validation.jsonl"),
},
),
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
for idx_, row in enumerate(f):
data = json.loads(row)
yield idx_, {"sentence1": data["sentence1"], "sentence2": data["sentence2"], "label": data["label"]}