sahuPrachi commited on
Commit
9539ef8
1 Parent(s): eac21fd

data loader script

Browse files
Files changed (1) hide show
  1. IndicHeadlineGeneration.py +112 -0
IndicHeadlineGeneration.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ import json
4
+ import os
5
+
6
+ import datasets
7
+
8
+ _CITATION = """\
9
+ @inproceedings{Kumar2022IndicNLGSM,
10
+ title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages},
11
+ author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar},
12
+ year={2022},
13
+ url = "https://arxiv.org/abs/2203.05437"
14
+ }
15
+ """
16
+
17
+ _DESCRIPTION = """\
18
+ This is the new headline generation dataset released as part of IndicNLG Suite. Each
19
+ input document is paired an output title. We create this dataset in eleven
20
+ languages including as, bn, gu, hi, kn, ml, mr, or, pa, ta, te. The total
21
+ size of the dataset is 1.43M.
22
+ """
23
+ _HOMEPAGE = "https://indicnlp.ai4bharat.org/indicnlg-suite"
24
+
25
+ _LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License"
26
+
27
+ _URL = "https://huggingface.co/datasets/ai4bharat/IndicHeadlineGeneration/resolve/main/data/{}_IndicHeadlineGeneration_v{}.tar.bz2"
28
+
29
+
30
+ _LANGUAGES = [
31
+ "as",
32
+ "bn",
33
+ "gu",
34
+ "hi",
35
+ "kn",
36
+ "ml",
37
+ "mr",
38
+ "or",
39
+ "pa",
40
+ "ta",
41
+ "te"
42
+ ]
43
+
44
+
45
+ class IndicHeadlineGeneration(datasets.GeneratorBasedBuilder):
46
+ VERSION = datasets.Version("1.0.0")
47
+
48
+ BUILDER_CONFIGS = [
49
+ datasets.BuilderConfig(
50
+ name="{}".format(lang),
51
+ version=datasets.Version("1.0.0")
52
+ )
53
+ for lang in _LANGUAGES
54
+ ]
55
+
56
+ def _info(self):
57
+ return datasets.DatasetInfo(
58
+ description=_DESCRIPTION,
59
+ features=datasets.Features(
60
+ {
61
+ "id":datasets.Value("string"),
62
+ "input": datasets.Value("string"),
63
+ "target": datasets.Value("string"),
64
+ "url":datasets.Value("string")
65
+
66
+ ),
67
+ supervised_keys=None,
68
+ homepage=_HOMEPAGE,
69
+ citation=_CITATION,
70
+ license=_LICENSE,
71
+ version=self.VERSION,
72
+ )
73
+
74
+ def _split_generators(self, dl_manager):
75
+ """Returns SplitGenerators."""
76
+ lang = str(self.config.name)
77
+ url = _URL.format(lang, self.VERSION.version_str[:-2])
78
+
79
+ data_dir = dl_manager.download_and_extract(url)
80
+ return [
81
+ datasets.SplitGenerator(
82
+ name=datasets.Split.TRAIN,
83
+ gen_kwargs={
84
+ "filepath": os.path.join(data_dir, lang + "_train.jsonl"),
85
+ },
86
+ ),
87
+ datasets.SplitGenerator(
88
+ name=datasets.Split.TEST,
89
+ gen_kwargs={
90
+ "filepath": os.path.join(data_dir, lang + "_test.jsonl"),
91
+ },
92
+ ),
93
+ datasets.SplitGenerator(
94
+ name=datasets.Split.VALIDATION,
95
+ gen_kwargs={
96
+ "filepath": os.path.join(data_dir, lang + "_dev.jsonl"),
97
+ },
98
+ ),
99
+ ]
100
+
101
+ def _generate_examples(self, filepath):
102
+ """Yields examples as (key, example) tuples."""
103
+ with open(filepath, encoding="utf-8") as f:
104
+ for idx_, row in enumerate(f):
105
+ data = json.loads(row)
106
+ yield idx_, {
107
+ "id":data["id"]
108
+ "input": data["Document"],
109
+ "target": data["Title"],
110
+ "url":data["URL"]
111
+
112
+ }