Datasets:

Modalities:
Text
ArXiv:
Libraries:
Datasets
License:
krsnaman commited on
Commit
47c33f4
1 Parent(s): 925b891

Update IndicWikiBio.py

Browse files
Files changed (1) hide show
  1. IndicWikiBio.py +108 -108
IndicWikiBio.py CHANGED
@@ -1,108 +1,108 @@
1
- import json
2
- import os
3
-
4
- import datasets
5
-
6
- _CITATION = """\
7
- @inproceedings{Kumar2022IndicNLGSM,
8
- title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages},
9
- author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar},
10
- year={2022},
11
- url = "https://arxiv.org/abs/2203.05437"
12
- }
13
- """
14
-
15
- _DESCRIPTION = """\
16
- This is the WikiBio dataset released as part of IndicNLG Suite. Each
17
- example has four fields: id, infobox, serialized infobox and summary. We create this dataset in nine
18
- languages including as, bn, hi, kn, ml, or, pa, ta, te. The total
19
- size of the dataset is 57,426.
20
- """
21
- _HOMEPAGE = "https://indicnlp.ai4bharat.org/indicnlg-suite"
22
-
23
- _LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License"
24
-
25
- _URL = "https://huggingface.co/datasets/ai4bharat/IndicWikiBio/resolve/main/data/{}_IndicWikiBio_v{}.tar.bz2"
26
-
27
-
28
- _LANGUAGES = [
29
- "as",
30
- "bn",
31
- "hi",
32
- "kn",
33
- "ml",
34
- "or",
35
- "pa",
36
- "ta",
37
- "te"
38
- ]
39
-
40
-
41
- class WikiBio(datasets.GeneratorBasedBuilder):
42
- VERSION = datasets.Version("1.0.0")
43
-
44
- BUILDER_CONFIGS = [
45
- datasets.BuilderConfig(
46
- name="{}".format(lang),
47
- version=datasets.Version("1.0.0")
48
- )
49
- for lang in _LANGUAGES
50
- ]
51
-
52
- def _info(self):
53
- return datasets.DatasetInfo(
54
- description=_DESCRIPTION,
55
- features=datasets.Features(
56
- {
57
- "id": datasets.Value("string"),
58
- "infobox": datasets.Value("string"),
59
- "serialized_infobox": datasets.Value("string"),
60
- "summary": datasets.Value("string")
61
- }
62
- ),
63
- supervised_keys=None,
64
- homepage=_HOMEPAGE,
65
- citation=_CITATION,
66
- license=_LICENSE,
67
- version=self.VERSION,
68
- )
69
-
70
- def _split_generators(self, dl_manager):
71
- """Returns SplitGenerators."""
72
- lang = str(self.config.name)
73
- url = _URL.format(lang, self.VERSION.version_str[:-2])
74
-
75
- data_dir = dl_manager.download_and_extract(url)
76
- return [
77
- datasets.SplitGenerator(
78
- name=datasets.Split.TRAIN,
79
- gen_kwargs={
80
- "filepath": os.path.join(data_dir, lang + "_train" + ".jsonl"),
81
- },
82
- ),
83
- datasets.SplitGenerator(
84
- name=datasets.Split.TEST,
85
- gen_kwargs={
86
- "filepath": os.path.join(data_dir, lang + "_test" + ".jsonl"),
87
- },
88
- ),
89
- datasets.SplitGenerator(
90
- name=datasets.Split.VALIDATION,
91
- gen_kwargs={
92
- "filepath": os.path.join(data_dir, lang + "_val" + ".jsonl"),
93
- },
94
- ),
95
- ]
96
-
97
- def _generate_examples(self, filepath):
98
- """Yields examples as (key, example) tuples."""
99
- with open(filepath, encoding="utf-8") as f:
100
- for idx_, row in enumerate(f):
101
- data = json.loads(row)
102
- yield idx_, {
103
- "id": data["id"],
104
- "infobox": data["infobox"],
105
- "serialized_infobox": data["serialized_infobox"],
106
- "summary": data["summary"]
107
-
108
- }
 
1
+ import json
2
+ import os
3
+
4
+ import datasets
5
+
6
+ _CITATION = """\
7
+ @inproceedings{Kumar2022IndicNLGSM,
8
+ title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages},
9
+ author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar},
10
+ year={2022},
11
+ url = "https://arxiv.org/abs/2203.05437"
12
+ }
13
+ """
14
+
15
+ _DESCRIPTION = """\
16
+ This is the WikiBio dataset released as part of IndicNLG Suite. Each
17
+ example has four fields: id, infobox, serialized infobox and summary. We create this dataset in nine
18
+ languages including as, bn, hi, kn, ml, or, pa, ta, te. The total
19
+ size of the dataset is 57,426.
20
+ """
21
+ _HOMEPAGE = "https://indicnlp.ai4bharat.org/indicnlg-suite"
22
+
23
+ _LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License"
24
+
25
+ _URL = "https://huggingface.co/datasets/ai4bharat/IndicWikiBio/resolve/main/data/{}_WikiBio_v{}.zip"
26
+
27
+
28
+ _LANGUAGES = [
29
+ "as",
30
+ "bn",
31
+ "hi",
32
+ "kn",
33
+ "ml",
34
+ "or",
35
+ "pa",
36
+ "ta",
37
+ "te"
38
+ ]
39
+
40
+
41
+ class WikiBio(datasets.GeneratorBasedBuilder):
42
+ VERSION = datasets.Version("1.0.0")
43
+
44
+ BUILDER_CONFIGS = [
45
+ datasets.BuilderConfig(
46
+ name="{}".format(lang),
47
+ version=datasets.Version("1.0.0")
48
+ )
49
+ for lang in _LANGUAGES
50
+ ]
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=datasets.Features(
56
+ {
57
+ "id": datasets.Value("string"),
58
+ "infobox": datasets.Value("string"),
59
+ "serialized_infobox": datasets.Value("string"),
60
+ "summary": datasets.Value("string")
61
+ }
62
+ ),
63
+ supervised_keys=None,
64
+ homepage=_HOMEPAGE,
65
+ citation=_CITATION,
66
+ license=_LICENSE,
67
+ version=self.VERSION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ """Returns SplitGenerators."""
72
+ lang = str(self.config.name)
73
+ url = _URL.format(lang, self.VERSION.version_str[:-2])
74
+
75
+ data_dir = dl_manager.download_and_extract(url)
76
+ return [
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.TRAIN,
79
+ gen_kwargs={
80
+ "filepath": os.path.join(data_dir, lang + "_train" + ".jsonl"),
81
+ },
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.TEST,
85
+ gen_kwargs={
86
+ "filepath": os.path.join(data_dir, lang + "_test" + ".jsonl"),
87
+ },
88
+ ),
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.VALIDATION,
91
+ gen_kwargs={
92
+ "filepath": os.path.join(data_dir, lang + "_val" + ".jsonl"),
93
+ },
94
+ ),
95
+ ]
96
+
97
+ def _generate_examples(self, filepath):
98
+ """Yields examples as (key, example) tuples."""
99
+ with open(filepath, encoding="utf-8") as f:
100
+ for idx_, row in enumerate(f):
101
+ data = json.loads(row)
102
+ yield idx_, {
103
+ "id": data["id"],
104
+ "infobox": data["infobox"],
105
+ "serialized_infobox": data["serialized_infobox"],
106
+ "summary": data["summary"]
107
+
108
+ }