holylovenia
commited on
Commit
•
9e06882
1
Parent(s):
0b7edee
Upload id_hoax_news.py with huggingface_hub
Browse files- id_hoax_news.py +125 -0
id_hoax_news.py
ADDED
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
from typing import List
|
3 |
+
|
4 |
+
import datasets
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
from nusacrowd.utils import schemas
|
8 |
+
from nusacrowd.utils.configs import NusantaraConfig
|
9 |
+
from nusacrowd.utils.constants import DEFAULT_NUSANTARA_VIEW_NAME, DEFAULT_SOURCE_VIEW_NAME, Tasks
|
10 |
+
|
11 |
+
_DATASETNAME = "id_hoax_news"
|
12 |
+
_SOURCE_VIEW_NAME = DEFAULT_SOURCE_VIEW_NAME
|
13 |
+
_UNIFIED_VIEW_NAME = DEFAULT_NUSANTARA_VIEW_NAME
|
14 |
+
|
15 |
+
_LANGUAGES = ["ind"] # We follow ISO639-3 langauge code (https://iso639-3.sil.org/code_tables/639/data)
|
16 |
+
_LOCAL = False
|
17 |
+
_CITATION = """\
|
18 |
+
@INPROCEEDINGS{8265649, author={Pratiwi, Inggrid Yanuar Risca and Asmara, Rosa Andrie and Rahutomo, Faisal}, booktitle={2017 11th International Conference on Information & Communication Technology and System (ICTS)}, title={Study of hoax news detection using naïve bayes classifier in Indonesian language}, year={2017}, volume={}, number={}, pages={73-78}, doi={10.1109/ICTS.2017.8265649}}
|
19 |
+
"""
|
20 |
+
|
21 |
+
_DESCRIPTION = """\
|
22 |
+
This research proposes to build an automatic hoax news detection and collects 250 pages of hoax and valid news articles in Indonesian language.
|
23 |
+
Each data sample is annotated by three reviewers and the final taggings are obtained by voting of those three reviewers.
|
24 |
+
"""
|
25 |
+
|
26 |
+
_HOMEPAGE = "https://data.mendeley.com/datasets/p3hfgr5j3m/1"
|
27 |
+
|
28 |
+
_LICENSE = "Creative Commons Attribution 4.0 International"
|
29 |
+
|
30 |
+
_URLs = {
|
31 |
+
"train": "https://data.mendeley.com/public-files/datasets/p3hfgr5j3m/files/38bfcff2-8a32-4920-9c26-4f63b5b2dad8/file_downloaded",
|
32 |
+
}
|
33 |
+
|
34 |
+
_SUPPORTED_TASKS = [Tasks.HOAX_NEWS_CLASSIFICATION]
|
35 |
+
|
36 |
+
_SOURCE_VERSION = "1.0.0"
|
37 |
+
_NUSANTARA_VERSION = "1.0.0"
|
38 |
+
|
39 |
+
|
40 |
+
class IdHoaxNews(datasets.GeneratorBasedBuilder):
|
41 |
+
|
42 |
+
BUILDER_CONFIGS = [
|
43 |
+
NusantaraConfig(
|
44 |
+
name="id_hoax_news_source",
|
45 |
+
version=datasets.Version(_SOURCE_VERSION),
|
46 |
+
description="Hoax News source schema",
|
47 |
+
schema="source",
|
48 |
+
subset_id="id_hoax_news",
|
49 |
+
),
|
50 |
+
NusantaraConfig(
|
51 |
+
name="id_hoax_news_nusantara_text",
|
52 |
+
version=datasets.Version(_NUSANTARA_VERSION),
|
53 |
+
description="Hoax News Nusantara schema",
|
54 |
+
schema="nusantara_text",
|
55 |
+
subset_id="id_hoax_news",
|
56 |
+
),
|
57 |
+
]
|
58 |
+
|
59 |
+
DEFAULT_CONFIG_NAME = "id_hoax_news_source"
|
60 |
+
|
61 |
+
def _info(self):
|
62 |
+
if self.config.schema == "source":
|
63 |
+
features = datasets.Features(
|
64 |
+
{
|
65 |
+
"index": datasets.Value("string"),
|
66 |
+
"news": datasets.Value("string"),
|
67 |
+
"label": datasets.Value("string"),
|
68 |
+
}
|
69 |
+
)
|
70 |
+
elif self.config.schema == "nusantara_text":
|
71 |
+
features = schemas.text_features(["Valid", "Hoax"])
|
72 |
+
|
73 |
+
return datasets.DatasetInfo(
|
74 |
+
description=_DESCRIPTION,
|
75 |
+
features=features,
|
76 |
+
homepage=_HOMEPAGE,
|
77 |
+
license=_LICENSE,
|
78 |
+
citation=_CITATION,
|
79 |
+
)
|
80 |
+
|
81 |
+
def _split_generators(self, dl_manager: datasets.DownloadManager) -> List[datasets.SplitGenerator]:
|
82 |
+
train_tsv_path = Path(dl_manager.download_and_extract(_URLs["train"]))
|
83 |
+
data_files = {
|
84 |
+
"train": train_tsv_path / "250 news with valid hoax label.csv",
|
85 |
+
}
|
86 |
+
|
87 |
+
return [
|
88 |
+
datasets.SplitGenerator(
|
89 |
+
name=datasets.Split.TRAIN,
|
90 |
+
gen_kwargs={"filepath": data_files["train"]},
|
91 |
+
),
|
92 |
+
]
|
93 |
+
|
94 |
+
def _generate_examples(self, filepath: Path):
|
95 |
+
news_file = open(filepath, 'r', encoding='ISO-8859-1')
|
96 |
+
lines = news_file.readlines()
|
97 |
+
news = []
|
98 |
+
labels = []
|
99 |
+
|
100 |
+
curr_news = ''
|
101 |
+
for l in lines[1:]:
|
102 |
+
l = l.replace('\n', '')
|
103 |
+
if ';Valid' in l:
|
104 |
+
curr_news += l.replace(';Valid', '')
|
105 |
+
news.append(curr_news)
|
106 |
+
labels.append('Valid')
|
107 |
+
curr_news = ''
|
108 |
+
elif ';Hoax' in l:
|
109 |
+
curr_news += l.replace(';Hoax', '')
|
110 |
+
news.append(curr_news)
|
111 |
+
labels.append('Hoax')
|
112 |
+
curr_news = ''
|
113 |
+
else:
|
114 |
+
curr_news += l + ' '
|
115 |
+
|
116 |
+
if self.config.schema == "source":
|
117 |
+
for i in range(len(news)):
|
118 |
+
ex = {"index": str(i), "news": news[i], "label": labels[i]}
|
119 |
+
yield i, ex
|
120 |
+
elif self.config.schema == "nusantara_text":
|
121 |
+
for i in range(len(news)):
|
122 |
+
ex = {"id": str(i), "text": news[i], "label": labels[i]}
|
123 |
+
yield i, ex
|
124 |
+
else:
|
125 |
+
raise ValueError(f"Invalid config: {self.config.name}")
|