khalidalt commited on
Commit
1092b34
1 Parent(s): e6bbe34

Create ultimate_arabic_news.py

Browse files
Files changed (1) hide show
  1. ultimate_arabic_news.py +106 -0
ultimate_arabic_news.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import datasets
3
+ import os
4
+
5
+ _DESCRIPTION = "TODO"
6
+
7
+ _HOMEPAGE = "TODO"
8
+
9
+ _LICENSE = "TODO"
10
+
11
+ _URL = {"news":"https://huggingface.co/datasets/khalidalt/ultimate_arabic_news/blob/main/UltimateArabic.csv"
12
+ "news_preproces":"https://huggingface.co/datasets/khalidalt/ultimate_arabic_news/blob/main/UltimateArabicPrePros.csv"}
13
+
14
+
15
+ class UAN_Config(datasets.BuilderConfig):
16
+
17
+ """BuilderConfig for Ultamte Arabic News"""
18
+
19
+ def __init__(self, **kwargs):
20
+ """
21
+ Args:
22
+ **kwargs: keyword arguments forwarded to super.
23
+ """
24
+ super(UAN_Config, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs)
25
+
26
+
27
+ class Ultimate_Arabic_News(datasets.GeneratorBasedBuilder):
28
+ VERSION = datasets.Version("1.1.0")
29
+ BUILDER_CONFIGS = [
30
+ TydiqaConfig(
31
+ name="UltimateArabic",
32
+ description=textwrap.dedent(
33
+ """\
34
+ UltimateArabic: A file containing more than 193,000 original Arabic news texts, without pre-processing. The texts contain words,
35
+ numbers, and symbols that can be removed using pre-processing to increase accuracy when using the dataset in various Arabic natural
36
+ language processing tasks such as text classification."""
37
+ ),
38
+ ),
39
+ TydiqaConfig(
40
+ name="UltimateArabicPrePros",
41
+ description=textwrap.dedent(
42
+ """UltimateArabicPrePros: It is a file that contains the data mentioned in the first file, but after pre-processing, where
43
+ the number of data became about 188,000 text documents, where stop words, non-Arabic words, symbols and numbers have been
44
+ removed so that this file is ready for use directly in the various Arabic natural language processing tasks. Like text
45
+ classification.
46
+ """
47
+ ),
48
+ ),
49
+ ]
50
+
51
+ def _info(self):
52
+ # TODO(tydiqa): Specifies the datasets.DatasetInfo object
53
+ return datasets.DatasetInfo(
54
+ # This is the description that will appear on the datasets page.
55
+ description=_DESCRIPTION,
56
+ # datasets.features.FeatureConnectors
57
+ features=datasets.Features(
58
+ {
59
+
60
+ "text": datasets.Value("string"),
61
+ "label": datasets.Value("string"),
62
+
63
+ ),
64
+ # If there's a common (input, target) tuple from the features,
65
+ # specify them here. They'll be used if as_supervised=True in
66
+ # builder.as_dataset.
67
+ supervised_keys=None,
68
+ # Homepage of the dataset for documentation
69
+ homepage="https://github.com/google-research-datasets/tydiqa",
70
+ citation=_CITATION,
71
+ )
72
+
73
+
74
+ def _split_generators(self, dl_manager):
75
+ """Returns SplitGenerators."""
76
+ # TODO(tydiqa): Downloads the data and defines the splits
77
+ # dl_manager is a datasets.download.DownloadManager that can be used to
78
+ # download and extract URLs
79
+ primary_downloaded = dl_manager.download_and_extract(_PRIMARY_URLS)
80
+ secondary_downloaded = dl_manager.download_and_extract(_SECONDARY_URLS)
81
+ if self.config.name == "primary_task":
82
+ return [
83
+ datasets.SplitGenerator(
84
+ name=datasets.Split.TRAIN,
85
+ # These kwargs will be passed to _generate_examples
86
+ gen_kwargs={"filepath": primary_downloaded["train"]},
87
+ ),
88
+ datasets.SplitGenerator(
89
+ name=datasets.Split.VALIDATION,
90
+ # These kwargs will be passed to _generate_examples
91
+ gen_kwargs={"filepath": primary_downloaded["dev"]},
92
+ ),
93
+ ]
94
+ elif self.config.name == "secondary_task":
95
+ return [
96
+ datasets.SplitGenerator(
97
+ name=datasets.Split.TRAIN,
98
+ # These kwargs will be passed to _generate_examples
99
+ gen_kwargs={"filepath": secondary_downloaded["train"]},
100
+ ),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.VALIDATION,
103
+ # These kwargs will be passed to _generate_examples
104
+ gen_kwargs={"filepath": secondary_downloaded["dev"]},
105
+ ),
106
+ ]