Datasets:

Languages:
Bengali
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
machine-generated
Source Datasets:
extended
ArXiv:
Tags:
License:
abhik1505040 commited on
Commit
2ec4604
1 Parent(s): cfbe5c2

Initial commit

Browse files
Files changed (3) hide show
  1. README.md +0 -0
  2. data/xnli_bn.tar.bz2 +3 -0
  3. xnli_bn.py +79 -0
README.md ADDED
File without changes
data/xnli_bn.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a91b4d3f8433a98fd6251396976b17b2385ef49ffbb207fabe8124fc6b066207
3
+ size 21437836
xnli_bn.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """XNLI Bengali dataset"""
2
+ import json
3
+ import os
4
+ import datasets
5
+ _CITATION = """\
6
+ @misc{bhattacharjee2021banglabert,
7
+ title={BanglaBERT: Combating Embedding Barrier in Multilingual Models for Low-Resource Language Understanding},
8
+ author={Abhik Bhattacharjee and Tahmid Hasan and Kazi Samin and Md Saiful Islam and M. Sohel Rahman and Anindya Iqbal and Rifat Shahriyar},
9
+ year={2021},
10
+ eprint={2101.00204},
11
+ archivePrefix={arXiv},
12
+ primaryClass={cs.CL}
13
+ }
14
+ """
15
+ _DESCRIPTION = """\
16
+ This is a Natural Language Inference (NLI) dataset for Bengali, curated using the subset of
17
+ MNLI data used in XNLI and state-of-the-art English to Bengali translation model.
18
+ """
19
+ _HOMEPAGE = "https://github.com/csebuetnlp/banglabert"
20
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
21
+ _URL = "https://huggingface.co/datasets/csebuetnlp/xnli_bn/resolve/main/data/xnli_bn.tar.bz2"
22
+ _VERSION = datasets.Version("0.0.1")
23
+
24
+ class XnliBn(datasets.GeneratorBasedBuilder):
25
+ """XNLI Bengali dataset"""
26
+
27
+ def _info(self):
28
+ features = datasets.Features(
29
+ {
30
+ "sentence1": datasets.Value("string"),
31
+ "sentence2": datasets.Value("string"),
32
+ "label": datasets.features.ClassLabel(
33
+ names=["contradiction", "entailment", "neurtral"]
34
+ ),
35
+ }
36
+ )
37
+ return datasets.DatasetInfo(
38
+ description=_DESCRIPTION,
39
+ features=features,
40
+ homepage=_HOMEPAGE,
41
+ license=_LICENSE,
42
+ citation=_CITATION,
43
+ version=_VERSION
44
+ )
45
+
46
+ def _split_generators(self, dl_manager):
47
+ """Returns SplitGenerators."""
48
+ data_dir = dl_manager.download_and_extract(_URL)
49
+ return [
50
+ datasets.SplitGenerator(
51
+ name=datasets.Split.TRAIN,
52
+ gen_kwargs={
53
+ "filepath": os.path.join(data_dir, "train.jsonl"),
54
+ },
55
+ ),
56
+ datasets.SplitGenerator(
57
+ name=datasets.Split.TEST,
58
+ gen_kwargs={
59
+ "filepath": os.path.join(data_dir, "test.jsonl"),
60
+ },
61
+ ),
62
+ datasets.SplitGenerator(
63
+ name=datasets.Split.VALIDATION,
64
+ gen_kwargs={
65
+ "filepath": os.path.join(data_dir, "validation.jsonl"),
66
+ },
67
+ ),
68
+ ]
69
+
70
+ def _generate_examples(self, filepath):
71
+ """Yields examples as (key, example) tuples."""
72
+ with open(filepath, encoding="utf-8") as f:
73
+ for idx_, row in enumerate(f):
74
+ data = json.loads(row)
75
+ yield idx_, {
76
+ "sentence1": data["sentence1"],
77
+ "sentence2": data["sentence2"],
78
+ "label": data["label"]
79
+ }