Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
lastrucci01 commited on
Commit
b689fe8
1 Parent(s): 6a6fe07

removed dataset loading script

Browse files
Files changed (1) hide show
  1. vukuzenzele-monolingual.py +0 -107
vukuzenzele-monolingual.py DELETED
@@ -1,107 +0,0 @@
1
-
2
- """TODO: Add a description here."""
3
-
4
-
5
- import csv
6
- import json
7
- import os
8
-
9
- import datasets
10
-
11
-
12
- # TODO: Add BibTeX citation
13
- # Find for instance the citation on arxiv or on the dataset repo/website
14
- _CITATION = """\
15
- @dataset{marivate_vukosi_2023_7598540, author = {Marivate, Vukosi and Njini, Daniel and Madodonga, Andani and Lastrucci, Richard and Dzingirai, Isheanesu Rajab, Jenalea}, title = {The Vuk'uzenzele South African Multilingual Corpus}, month = feb, year = 2023, publisher = {Zenodo}, doi = {10.5281/zenodo.7598539}, url = {https://doi.org/10.5281/zenodo.7598539} }
16
- """
17
-
18
- _DESCRIPTION = """\
19
- The dataset contains editions from the South African government magazine Vuk'uzenzele. Data was scraped from PDFs that have been placed in the data/raw folder. The PDFS were obtained from the Vuk'uzenzele website.
20
- """
21
-
22
- # TODO: Add a link to an official homepage for the dataset here
23
- _HOMEPAGE = "https://arxiv.org/abs/2303.03750"
24
-
25
- # TODO: Add the licence for the dataset here if you can find it
26
- _LICENSE = "CC 4.0 BY"
27
-
28
- _URL = "https://raw.githubusercontent.com/dsfsi/vukuzenzele-nlp/master/data/huggingface/"
29
- _DATAFILE = "data.jsonl"
30
-
31
- class VukuzenzeleMonolingualConfig(datasets.BuilderConfig):
32
- """BuilderConfig for VukuzenzeleMonolingual"""
33
-
34
- def __init__(self, **kwargs):
35
- """BuilderConfig for Masakhaner.
36
- Args:
37
- **kwargs: keyword arguments forwarded to super.
38
- """
39
- super(VukuzenzeleMonolingualConfig, self).__init__(**kwargs)
40
-
41
-
42
- # TODO: Name of the dataset usually matches the script name with CamelCase instead of snake_case
43
- class VukuzenzeleMonolingual(datasets.GeneratorBasedBuilder):
44
- """TODO: Short description of my dataset."""
45
-
46
- VERSION = datasets.Version("1.0.0")
47
-
48
- BUILDER_CONFIGS = [
49
- datasets.BuilderConfig(name="afr", version=VERSION, description="Vukuzenzele Afrikaans Dataset"),
50
- datasets.BuilderConfig(name="eng", version=VERSION, description="Vukuzenzele English Dataset"),
51
- datasets.BuilderConfig(name="nbl", version=VERSION, description="Vukuzenzele Ndebele Dataset"),
52
- datasets.BuilderConfig(name="nso", version=VERSION, description="Vukuzenzele Sepedi Dataset"),
53
- datasets.BuilderConfig(name="sot", version=VERSION, description="Vukuzenzele Sesotho Dataset"),
54
- datasets.BuilderConfig(name="ssw", version=VERSION, description="Vukuzenzele siSwati Dataset"),
55
- datasets.BuilderConfig(name="tsn", version=VERSION, description="Vukuzenzele Setswana Dataset"),
56
- datasets.BuilderConfig(name="tso", version=VERSION, description="Vukuzenzele Xitsonga Dataset"),
57
- datasets.BuilderConfig(name="ven", version=VERSION, description="Vukuzenzele Tshivenda Dataset"),
58
- datasets.BuilderConfig(name="xho", version=VERSION, description="Vukuzenzele isiXhosa Dataset"),
59
- datasets.BuilderConfig(name="zul", version=VERSION, description="Vukuzenzele isiZulu Dataset"),
60
- ]
61
-
62
- def _info(self):
63
- features = datasets.Features(
64
- {
65
- "title": datasets.Value("string"),
66
- "text": datasets.Value("string"),
67
- "language_code": datasets.Value("string"),
68
- "edition": datasets.Value("string")
69
- }
70
- )
71
- return datasets.DatasetInfo(
72
- description=_DESCRIPTION,
73
- features=features,
74
- homepage=_HOMEPAGE,
75
- license=_LICENSE,
76
- # Citation for the dataset
77
- citation=_CITATION,
78
- )
79
-
80
- def _split_generators(self, dl_manager):
81
-
82
- urls = {
83
- "train": f"{_URL}{self.config.name}/{_DATAFILE}"
84
- }
85
- data_dir = dl_manager.download_and_extract(urls)
86
- return [
87
- datasets.SplitGenerator(
88
- name=datasets.Split.TRAIN,
89
- gen_kwargs={
90
- "filepath": data_dir["train"],
91
- "split": "train",
92
- },
93
- ),
94
- ]
95
-
96
- def _generate_examples(self, filepath, split):
97
- with open(filepath, encoding="utf-8") as f:
98
- for key, row in enumerate(f):
99
- data = json.loads(row)
100
- if 'title' not in data.keys(): continue
101
- yield key, {
102
- "title": data["title"],
103
- "text": data["text"],
104
- "edition": data["edition"],
105
- "language_code": data["language_code"],
106
- }
107
-