File size: 3,446 Bytes
1ec4c51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1b0cb0c
1ec4c51
 
 
 
 
1b0cb0c
1ec4c51
 
 
 
 
1b0cb0c
1ec4c51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49992ce
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112


import json
import os

import datasets

_CITATION = """\
@inproceedings{Kumar2022IndicNLGSM,
  title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages},
  author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar},
  year={2022},
  url = "https://arxiv.org/abs/2203.05437"
}
"""

_DESCRIPTION = """\
This is the sentence summarization dataset released as part of IndicNLG Suite. Each 
input sentence is paired with an output summary. We create this dataset in eleven 
languages including as, bn, gu, hi, kn, ml, mr, or, pa, ta and te. The total
size of the dataset is 431K.
"""
_HOMEPAGE = "https://indicnlp.ai4bharat.org/indicnlg-suite"

_LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License"

_URL = "https://huggingface.co/datasets/ai4bharat/IndicSentenceSummarization/resolve/main/data/{}_IndicSentenceSummarization_v{}.zip"


_LANGUAGES = [
    "as",
    "bn",
    "gu",
    "hi",
    "kn",
    "ml",
    "mr",
    "or",
    "pa",
    "ta",
    "te"
]
    

class IndicSentenceSummarization(datasets.GeneratorBasedBuilder):
    VERSION = datasets.Version("1.0.0")
    
    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name="{}".format(lang),
            version=datasets.Version("1.0.0")
        )
        for lang in _LANGUAGES
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "id":datasets.Value("string"),
                    "input": datasets.Value("string"),
                    "target": datasets.Value("string"),
                    "url":datasets.Value("string")
                }
            ),
            supervised_keys=None,
            homepage=_HOMEPAGE,
            citation=_CITATION,
            license=_LICENSE,
            version=self.VERSION,
        )

    def _split_generators(self, dl_manager):
        """Returns SplitGenerators."""
        lang = str(self.config.name)
        url = _URL.format(lang, self.VERSION.version_str[:-2])

        data_dir = dl_manager.download_and_extract(url)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": os.path.join(data_dir,"content",lang + "_train.jsonl"),
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={
                    "filepath": os.path.join(data_dir,"content",lang + "_test.jsonl"),
                },
            ),
            datasets.SplitGenerator(
                name=datasets.Split.VALIDATION,
                gen_kwargs={
                    "filepath": os.path.join(data_dir,"content",lang + "_dev.jsonl"),
                },
            ),
        ]

    def _generate_examples(self, filepath):
        """Yields examples as (key, example) tuples."""
        with open(filepath, encoding="utf-8") as f:
            for idx_, row in enumerate(f):
                data = json.loads(row)
                yield idx_, {
                    "id":data["id"],
                    "input": data["Sentence"],
                    "target": data["Summary"],
                    "url":data["URL"]
                   
                }