Datasets:

Languages:
Bengali
Multilinguality:
monolingual
Size Categories:
100k<n<1M
Language Creators:
found
Annotations Creators:
found
Source Datasets:
original
ArXiv:
License:
Shukti commited on
Commit
1c09063
1 Parent(s): ed154f9

add data loader and restructure data

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ **/.DS_Store
BanglaParaphrase.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import datasets
5
+
6
+ _CITATION = """
7
+ to be added
8
+ """
9
+
10
+ _DESCRIPTION = """\
11
+ We present a high quality bangla paraphrase dataset containing about 466k paraphrase pairs. The paraphrases ensures high quality by being semantically coherent and syntactically diverse.
12
+
13
+ """
14
+ _HOMEPAGE = "https://github.com/csebuetnlp/banglaparaphrase"
15
+
16
+ _LICENSE = "Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License (CC BY-NC-SA 4.0)"
17
+
18
+ _URL = "https://huggingface.co/datasets/csebuetnlp/BanglaParaphrase/resolve/main/data/BanglaParaphrase.zip"
19
+
20
+
21
+ _LANGUAGES = [
22
+ "bn"
23
+ ]
24
+
25
+
26
+ class IndicParaphrase(datasets.GeneratorBasedBuilder):
27
+ VERSION = datasets.Version("1.0.0")
28
+
29
+ BUILDER_CONFIGS = [
30
+ datasets.BuilderConfig(
31
+ name="bn",
32
+ version=datasets.Version("1.0.0")
33
+ )
34
+ ]
35
+
36
+ def _info(self):
37
+ return datasets.DatasetInfo(
38
+ description=_DESCRIPTION,
39
+ features=datasets.Features(
40
+ {
41
+ "source": datasets.Value("string"),
42
+ "target": datasets.Value("string")
43
+ }
44
+ ),
45
+ supervised_keys=None,
46
+ homepage=_HOMEPAGE,
47
+ citation=_CITATION,
48
+ license=_LICENSE,
49
+ version=self.VERSION,
50
+ )
51
+
52
+ def _split_generators(self, dl_manager):
53
+ """Returns SplitGenerators."""
54
+ url = _URL
55
+ data_dir = dl_manager.download_and_extract(url)
56
+
57
+ return [
58
+ datasets.SplitGenerator(
59
+ name=datasets.Split.TRAIN,
60
+ gen_kwargs={
61
+ "filepath": os.path.join(data_dir, "train.jsonl"),
62
+ },
63
+ ),
64
+ datasets.SplitGenerator(
65
+ name=datasets.Split.TEST,
66
+ gen_kwargs={
67
+ "filepath": os.path.join(data_dir, "test.jsonl"),
68
+ },
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.VALIDATION,
72
+ gen_kwargs={
73
+ "filepath": os.path.join(data_dir, "validation.jsonl"),
74
+ },
75
+ ),
76
+ ]
77
+
78
+
79
+
80
+ def _generate_examples(self, filepath):
81
+ """Yields examples as (key, example) tuples."""
82
+ with open(filepath, encoding="utf-8") as f:
83
+ for idx_, row in enumerate(f):
84
+ data = json.loads(row)
85
+ yield idx_, {
86
+ "source": data["source"],
87
+ "target": data["target"],
88
+ }
train.jsonl → data/BanglaParaphrase.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0701202e4d43eb5d27826e3b65069ea60a2fde5b4bd1bf8461d752bb2f6d62ba
3
- size 179718093
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f27c967bcb75620fa2c94fb34159795f680df3b28b45f9f40b88a1bf1573a6f
3
+ size 38002116
test.jsonl DELETED
The diff for this file is too large to render. See raw diff
 
validation.jsonl DELETED
The diff for this file is too large to render. See raw diff