Datasets:

Multilinguality:
multilingual
Size Categories:
1960<n<11,502
Language Creators:
found
Annotations Creators:
no-annotation
ArXiv:
Tags:
License:
krsnaman commited on
Commit
636a780
1 Parent(s): b7e4f86

add jsonl file

Browse files
Data/as_IndicWikiBio_v1.0.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b2df7acc7d9ffd514a343abf83a4922901c8264ab644b46bb06c80deea600c2
3
+ size 1583311
Data/bn_IndicWikiBio_v1.0.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d379afe20a4535fe56f0475dde97bb831bc1269138ab97033b9976e6844af96
3
+ size 7319073
Data/hi_IndicWikiBio_v1.0.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:096f794dfe46eab54d08f16e96c5a8f2fdb7f80eb2165b8de9e7190c86176ab7
3
+ size 5947309
Data/kn_IndicWikiBio_v1.0.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:28d7c8d859eaed61e228fc621f80af0b95e1c383f5fe9fa5159481c945dd3d91
3
+ size 1434267
Data/ml_IndicWikiBio_v1.0.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d1f69d04c0174ea26bad9f4ba0c6bedd78f76874a7183e472545aa3a96536df
3
+ size 7440718
Data/or_IndicWikiBio_v1.0.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff1163f43428ded46118e4aa4bb52acb98aa564f4d7bebcdf04e38a317e096c1
3
+ size 1500637
Data/pa_IndicWikiBio_v1.0.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5f302cf71b29b05ff62aa282eb1b759634811262c7ab1fa3909b0d248f9a521
3
+ size 4858781
Data/ta_IndicWikiBio_v1.0.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5279def9edfd309393532564198bd674343a3b7e1912e123b750062b16d689af
3
+ size 9941934
Data/te_IndicWikiBio_v1.0.tar.bz2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04710708a2b55bd1da7f47be36d7914d467197c82d5e5474ffe47e8b4a5c1f9e
3
+ size 2568940
IndicWikiBio.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import os
3
+
4
+ import datasets
5
+
6
+ _CITATION = """\
7
+ @inproceedings{Kumar2022IndicNLGSM,
8
+ title={IndicNLG Suite: Multilingual Datasets for Diverse NLG Tasks in Indic Languages},
9
+ author={Aman Kumar and Himani Shrotriya and Prachi Sahu and Raj Dabre and Ratish Puduppully and Anoop Kunchukuttan and Amogh Mishra and Mitesh M. Khapra and Pratyush Kumar},
10
+ year={2022},
11
+ url = "https://arxiv.org/abs/2203.05437"
12
+ }
13
+ """
14
+
15
+ _DESCRIPTION = """\
16
+ This is the WikiBio dataset released as part of IndicNLG Suite. Each
17
+ example has four fields: id, infobox, serialized infobox and summary. We create this dataset in nine
18
+ languages including as, bn, hi, kn, ml, or, pa, ta, te. The total
19
+ size of the dataset is 57,426.
20
+ """
21
+ _HOMEPAGE = "https://indicnlp.ai4bharat.org/indicnlg-suite"
22
+
23
+ _LICENSE = "Creative Commons Attribution-NonCommercial 4.0 International Public License"
24
+
25
+ _URL = "https://huggingface.co/datasets/ai4bharat/IndicParaphrase/resolve/main/data/{}_IndicParaphrase_v{}.tar.bz2"
26
+
27
+
28
+ _LANGUAGES = [
29
+ "as",
30
+ "bn",
31
+ "hi",
32
+ "kn",
33
+ "ml",
34
+ "or",
35
+ "pa",
36
+ "ta",
37
+ "te"
38
+ ]
39
+
40
+
41
+ class WikiBio(datasets.GeneratorBasedBuilder):
42
+ VERSION = datasets.Version("1.0.0")
43
+
44
+ BUILDER_CONFIGS = [
45
+ datasets.BuilderConfig(
46
+ name="{}".format(lang),
47
+ version=datasets.Version("1.0.0")
48
+ )
49
+ for lang in _LANGUAGES
50
+ ]
51
+
52
+ def _info(self):
53
+ return datasets.DatasetInfo(
54
+ description=_DESCRIPTION,
55
+ features=datasets.Features(
56
+ {
57
+ "id": datasets.Value("string"),
58
+ "infobox": datasets.Value("string"),
59
+ "serialized_infobox": datasets.Value("string"),
60
+ "summary": datasets.Value("string")
61
+ }
62
+ ),
63
+ supervised_keys=None,
64
+ homepage=_HOMEPAGE,
65
+ citation=_CITATION,
66
+ license=_LICENSE,
67
+ version=self.VERSION,
68
+ )
69
+
70
+ def _split_generators(self, dl_manager):
71
+ """Returns SplitGenerators."""
72
+ lang = str(self.config.name)
73
+ url = _URL.format(lang, self.VERSION.version_str[:-2])
74
+
75
+ data_dir = dl_manager.download_and_extract(url)
76
+ return [
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.TRAIN,
79
+ gen_kwargs={
80
+ "filepath": os.path.join(data_dir, "train_" + lang + ".jsonl"),
81
+ },
82
+ ),
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.TEST,
85
+ gen_kwargs={
86
+ "filepath": os.path.join(data_dir, "test_" + lang + ".jsonl"),
87
+ },
88
+ ),
89
+ datasets.SplitGenerator(
90
+ name=datasets.Split.VALIDATION,
91
+ gen_kwargs={
92
+ "filepath": os.path.join(data_dir, "val_" + lang + ".jsonl"),
93
+ },
94
+ ),
95
+ ]
96
+
97
+ def _generate_examples(self, filepath):
98
+ """Yields examples as (key, example) tuples."""
99
+ with open(filepath, encoding="utf-8") as f:
100
+ for idx_, row in enumerate(f):
101
+ data = json.loads(row)
102
+ yield idx_, {
103
+ "id": data["id"],
104
+ "infobox": data["infobox"],
105
+ "serialized_infobox": data["serialized_infobox"],
106
+ "summary": data["summary"]
107
+
108
+ }
data/README.md DELETED
@@ -1,3 +0,0 @@
1
- ---
2
- license: cc-by-nc-4.0
3
- ---