Datasets:

Languages:
Bengali
Multilinguality:
monolingual
Size Categories:
100K<n<1M
Language Creators:
found
Annotations Creators:
machine-generated
Source Datasets:
extended
ArXiv:
Tags:
License:
abhik1505040 commited on
Commit
b3c2c81
1 Parent(s): 5cf04b2

Create dailydialogue_bn.py

Browse files
Files changed (1) hide show
  1. dailydialogue_bn.py +98 -0
dailydialogue_bn.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """DailyDialogue Bengali Dataset"""
2
+
3
+ import os
4
+ import json
5
+
6
+ import datasets
7
+
8
+ _CITATION = """\
9
+ @inproceedings{bhattacharjee-etal-2023-banglanlg,
10
+ title = "{B}angla{NLG} and {B}angla{T}5: Benchmarks and Resources for Evaluating Low-Resource Natural Language Generation in {B}angla",
11
+ author = "Bhattacharjee, Abhik and
12
+ Hasan, Tahmid and
13
+ Ahmad, Wasi Uddin and
14
+ Shahriyar, Rifat",
15
+ booktitle = "Findings of the Association for Computational Linguistics: EACL 2023",
16
+ month = may,
17
+ year = "2023",
18
+ address = "Dubrovnik, Croatia",
19
+ publisher = "Association for Computational Linguistics",
20
+ url = "https://aclanthology.org/2023.findings-eacl.54",
21
+ pages = "726--735",
22
+ abstract = "This work presents {`}BanglaNLG,{'} a comprehensive benchmark for evaluating natural language generation (NLG) models in Bangla, a widely spoken yet low-resource language. We aggregate six challenging conditional text generation tasks under the BanglaNLG benchmark, introducing a new dataset on dialogue generation in the process. Furthermore, using a clean corpus of 27.5 GB of Bangla data, we pretrain {`}BanglaT5{'}, a sequence-to-sequence Transformer language model for Bangla. BanglaT5 achieves state-of-the-art performance in all of these tasks, outperforming several multilingual models by up to 9{\%} absolute gain and 32{\%} relative gain. We are making the new dialogue dataset and the BanglaT5 model publicly available at https://github.com/csebuetnlp/BanglaNLG in the hope of advancing future research on Bangla NLG.",
23
+ }
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ DailyDialogue (bengali) has been derived from the original English dataset.
28
+ """
29
+
30
+ _HOMEPAGE = "https://github.com/csebuetnlp/BanglaNLG"
31
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
32
+ _URL = "https://huggingface.co/datasets/csebuetnlp/dailydialogue_bn/resolve/main/data/dailydialogue_bn.tar.bz2"
33
+ _VERSION = datasets.Version("0.0.1")
34
+
35
+
36
+
37
+ class DailydialogueBn(datasets.GeneratorBasedBuilder):
38
+ """DailyDialogue Bengali Dataset"""
39
+
40
+ BUILDER_CONFIGS = [
41
+ datasets.BuilderConfig(
42
+ name="dailydialogue_bn",
43
+ version=_VERSION,
44
+ description=_DESCRIPTION,
45
+ )
46
+ ]
47
+
48
+ def _info(self):
49
+ return datasets.DatasetInfo(
50
+ description=_DESCRIPTION,
51
+ features=datasets.Features(
52
+ {
53
+ "id": datasets.Value("string"),
54
+ "dialogue": datasets.features.Sequence(
55
+ datasets.Value("string")
56
+ ),
57
+ }
58
+ ),
59
+ supervised_keys=None,
60
+ homepage=_HOMEPAGE,
61
+ license=_LICENSE,
62
+ citation=_CITATION,
63
+ )
64
+
65
+ def _split_generators(self, dl_manager):
66
+ """Returns SplitGenerators."""
67
+ data_dir = os.path.join(dl_manager.download_and_extract(_URL), "dailydialogue_bn")
68
+ return [
69
+ datasets.SplitGenerator(
70
+ name=datasets.Split.TRAIN,
71
+ gen_kwargs={
72
+ "filepath": os.path.join(data_dir, "train.jsonl"),
73
+ },
74
+ ),
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TEST,
77
+ gen_kwargs={
78
+ "filepath": os.path.join(data_dir, "test.jsonl"),
79
+ },
80
+ ),
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.VALIDATION,
83
+ gen_kwargs={
84
+ "filepath": os.path.join(data_dir, "validation.jsonl"),
85
+ },
86
+ ),
87
+ ]
88
+
89
+ def _generate_examples(self, filepath):
90
+ """Yields examples as (key, example) tuples."""
91
+
92
+ with open(filepath, encoding="utf-8") as f:
93
+ for i, line in enumerate(f):
94
+ data = json.loads(line.strip())['source']
95
+ yield i, {
96
+ "id": str(i),
97
+ "dialogue": data
98
+ }