jaspercatapang commited on
Commit
c70b94b
1 Parent(s): efb7b4c

Upload ph_en_text_detoxed.py

Browse files
Files changed (1) hide show
  1. ph_en_text_detoxed.py +79 -0
ph_en_text_detoxed.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Philippine English Text Corpus."""
18
+ #template from agNews and phNews, tweet_eval
19
+
20
+ import os
21
+ import datasets
22
+ import csv
23
+
24
+
25
+ _DESCRIPTION = """\
26
+ PhEnText Detoxed is a large-scale and multi-domain lexical data written in Philippine English text.
27
+ The news articles, religious articles and court decisions collated by the original researchers were filtered for toxicity and special characters were further preprocessed.
28
+ """
29
+
30
+ _CITATION = """\
31
+
32
+ }
33
+ """
34
+
35
+ _TRAIN_DOWNLOAD_URL = "https://huggingface.co/datasets/NLPinas/ph-en-text/blob/main/ph_en_text_detoxed_train.csv"
36
+ _TEST_DOWNLOAD_URL = "https://huggingface.co/datasets/NLPinas/ph-en-text/blob/main/ph_en_text_detoxed_test.csv"
37
+
38
+
39
+ class PhEnText(datasets.GeneratorBasedBuilder):
40
+ """Philippine English Text (PhEnText) Corpus."""
41
+
42
+ def _info(self):
43
+ features = datasets.Features(
44
+ {
45
+ "id": datasets.Value("int"),
46
+ "text": datasets.Value("string"),
47
+ }
48
+ )
49
+
50
+ return datasets.DatasetInfo(
51
+ description=_DESCRIPTION,
52
+ features=features
53
+ homepage=" ",
54
+ citation=_CITATION,
55
+ )
56
+
57
+ def _split_generators(self, dl_manager):
58
+ train_path = dl_manager.download_and_extract(_TRAIN_DOWNLOAD_URL)
59
+ test_path = dl_manager.download_and_extract(_TEST_DOWNLOAD_URL)
60
+
61
+ return [
62
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_path}),
63
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_path}),
64
+ ]
65
+
66
+ def _generate_examples(self, filepath):
67
+ """PhEnText examples."""
68
+ with open(filepath, encoding="utf-8") as csv_file:
69
+
70
+ csv_reader = csv.reader(
71
+ csv_file, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True
72
+ )
73
+
74
+ for id_, row in enumerate(csv_reader):
75
+ id, text = row
76
+ if row.strip():
77
+ yield id_, {"id": id, "text": text}
78
+ else:
79
+ yield id_, {"id": id, "text": ""}