Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
himani commited on
Commit
5c55837
1 Parent(s): 281f0ba

Add dataset loading script

Browse files
Files changed (1) hide show
  1. IndicParaphrase.py +106 -0
IndicParaphrase.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+
4
+ import json
5
+ import os
6
+
7
+ import datasets
8
+
9
+ _CITATION = """\
10
+
11
+ """
12
+
13
+ _DESCRIPTION = """\
14
+
15
+
16
+ """
17
+ _HOMEPAGE = " "
18
+
19
+ _LICENSE = " "
20
+
21
+ _URL = "https://huggingface.co/datasets/ai4bharat/IndicParaphrase/resolve/main/data/{}_IndicParaphrase_v{}.tar.bz2"
22
+
23
+
24
+ _LANGUAGES = [
25
+ "as",
26
+ "bn",
27
+ "gu",
28
+ "hi",
29
+ "kn",
30
+ "ml",
31
+ "mr",
32
+ "or",
33
+ "pa",
34
+ "ta",
35
+ "te"
36
+ ]
37
+
38
+
39
+ class Xlsum(datasets.GeneratorBasedBuilder):
40
+ VERSION = datasets.Version("1.0.0")
41
+
42
+ BUILDER_CONFIGS = [
43
+ datasets.BuilderConfig(
44
+ name="{}".format(lang),
45
+ version=datasets.Version("1.0.0")
46
+ )
47
+ for lang in _LANGUAGES
48
+ ]
49
+
50
+ def _info(self):
51
+ return datasets.DatasetInfo(
52
+ description=_DESCRIPTION,
53
+ features=datasets.Features(
54
+ {
55
+ "id": datasets.Value("string"),
56
+ "input": datasets.Value("string"),
57
+ "target": datasets.Value("string"),
58
+ "references": [datasets.Value("string")]
59
+ }
60
+ ),
61
+ supervised_keys=None,
62
+ homepage=_HOMEPAGE,
63
+ citation=_CITATION,
64
+ license=_LICENSE,
65
+ version=self.VERSION,
66
+ )
67
+
68
+ def _split_generators(self, dl_manager):
69
+ """Returns SplitGenerators."""
70
+ lang = str(self.config.name)
71
+ url = _URL.format(lang, self.VERSION.version_str[:-2])
72
+
73
+ data_dir = dl_manager.download_and_extract(url)
74
+ return [
75
+ datasets.SplitGenerator(
76
+ name=datasets.Split.TRAIN,
77
+ gen_kwargs={
78
+ "filepath": os.path.join(data_dir, "train_" + lang + ".jsonl"),
79
+ },
80
+ ),
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.TEST,
83
+ gen_kwargs={
84
+ "filepath": os.path.join(data_dir, "test_" + lang + ".jsonl"),
85
+ },
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.VALIDATION,
89
+ gen_kwargs={
90
+ "filepath": os.path.join(data_dir, "dev_" + lang + ".jsonl"),
91
+ },
92
+ ),
93
+ ]
94
+
95
+ def _generate_examples(self, filepath):
96
+ """Yields examples as (key, example) tuples."""
97
+ with open(filepath, encoding="utf-8") as f:
98
+ for idx_, row in enumerate(f):
99
+ data = json.loads(row)
100
+ yield idx_, {
101
+ "id": data["id"],
102
+ "input": data["input"],
103
+ "target": data["target"],
104
+ "references": data["references"]
105
+
106
+ }