Datasets:

Languages:
Portuguese
DOI:
License:
File size: 12,953 Bytes
99384c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d31b0d2
 
 
 
 
99384c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
776122e
99384c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""FactChecksbr dataset"""

import textwrap
import csv
import os

import datasets


_CITATION = """\
@misc{FactChecksbr,
author = {R. S. Gomes, Juliana},
title = {FactChecks.br},
url = {https://github.com/fake-news-UFG/FactChecks.br},
doi = { 10.57967/hf/1016 },
}
"""


_DESCRIPTION = """\
Collection of Portuguese Fact-Checking Benchmarks.
"""

_HOMEPAGE = "https://github.com/fake-news-UFG/FactChecks.br"

_LICENSE = "https://raw.githubusercontent.com/fake-news-UFG/FactChecks.br/main/LICENSE"

_URL = "https://github.com/fake-news-UFG/FactChecks.br/releases/download/v0.1/FactChecksbr.zip"


# from https://huggingface.co/datasets/glue/blob/main/glue.py
class GlueConfig(datasets.BuilderConfig):
    """BuilderConfig for GLUE."""

    def __init__(
        self,
        citation,
        **kwargs,
    ):
        super(GlueConfig, self).__init__(
            version=datasets.Version("0.1.0", ""), **kwargs
        )
        self.citation = citation


class FactChecksbr(datasets.GeneratorBasedBuilder):
    """Collection of Portuguese Fact-Checking Benchmarks."""

    VERSION = datasets.Version("0.1.0")

    BUILDER_CONFIGS = [
        GlueConfig(
            name="fact_check_tweet_pt",
            description="",
            citation=textwrap.dedent(
                """\
            @misc{kazemi2022matching,
                title={Matching Tweets With Applicable Fact-Checks Across Languages}, 
                author={Ashkan Kazemi and Zehua Li and Verónica Pérez-Rosas and Scott A. Hale and Rada Mihalcea},
                year={2022},
                eprint={2202.07094},
                archivePrefix={arXiv},
                primaryClass={cs.CL}
            }
            """
            ),
        ),
        GlueConfig(
            name="central_de_fatos",
            description=textwrap.dedent(
                """\
            In recent times, the interest for research dissecting the dissemination
            and prevention of misinformation in the online environment has spiked dramatically.
            Given that scenario, a recurring obstacle is the unavailability of public datasets
            containing fact-checked instances."""
            ),
            citation=textwrap.dedent(
                """\
            @inproceedings{dsw,
            author = {João Couto and Breno Pimenta and Igor M. de Araújo and Samuel Assis and Julio C. S. Reis and Ana Paula da Silva and Jussara Almeida and Fabrício Benevenuto},
            title = {Central de Fatos: Um Repositório de Checagens de Fatos},
            booktitle = {Anais do III Dataset Showcase Workshop},
            location = {Rio de Janeiro},
            year = {2021},
            keywords = {},
            issn = {0000-0000},
            pages = {128--137},
            publisher = {SBC},
            address = {Porto Alegre, RS, Brasil},
            doi = {10.5753/dsw.2021.17421},
            url = {https://sol.sbc.org.br/index.php/dsw/article/view/17421}
            }
            """
            ),
        ),
        GlueConfig(
            name="FakeNewsSet",
            description="",
            citation=textwrap.dedent(
                """\
                @inproceedings{10.1145/3428658.3430965,
                author = {da Silva, Fl\'{a}vio Roberto Matias and Freire,
                Paulo M\'{a}rcio Souza and de Souza, Marcelo Pereira and de A. B. Plenamente,
                Gustavo and Goldschmidt, Ronaldo Ribeiro},
                title = {FakeNewsSetGen: A Process to Build Datasets That
                Support Comparison Among Fake News Detection Methods},
                year = {2020},
                isbn = {9781450381963},
                publisher = {Association for Computing Machinery},
                address = {New York, NY, USA},
                url = {https://doi.org/10.1145/3428658.3430965},
                doi = {10.1145/3428658.3430965},
                abstract = {Due to easy access and low cost, social media online
                news consumption has increased significantly for the last decade.
                Despite their benefits, some social media allow anyone to post news
                with intense spreading power, which amplifies an old problem: the dissemination of Fake News.
                In the face of this scenario, several machine learning-based methods
                to automatically detect Fake News (MLFN) have been proposed.
                All of them require datasets to train and evaluate their detection models.
                Although recent MLFN were designed to consider data regarding the news propagation
                on social media, most of the few available datasets do not contain this kind of data.
                Hence, comparing the performances amid those recent MLFN and the others is restricted
                to a very limited number of datasets. Moreover, all existing datasets with propagation
                data do not contain news in Portuguese, which impairs the evaluation of the MLFN in this language.
                Thus, this work proposes FakeNewsSetGen, a process that builds Fake News datasets that contain news
                propagation data and support comparison amid the state-of-the-art MLFN. FakeNewsSetGen's software
                engineering process was guided to include all kind of data required by the existing MLFN. In order
                to illustrate FakeNewsSetGen's viability and adequacy, a case study was carried out. It encompassed
                the implementation of a FakeNewsSetGen prototype and the application of this prototype to create a
                dataset called FakeNewsSet, with news in Portuguese. Five MLFN with different kind of data requirements
                (two of them demanding news propagation data) were applied to FakeNewsSet and compared, demonstrating the
                potential use of both the proposed process and the created dataset.},
                booktitle = {Proceedings of the Brazilian Symposium on Multimedia and the Web},
                pages = {241–248},
                numpages = {8},
                keywords = {Fake News detection, Dataset building process, social media},
                location = {S\~{a}o Lu\'{\i}s, Brazil},
                series = {WebMedia '20}
                }
                """
            ),
        ),
        GlueConfig(
            name="FakeRecogna",
            description=textwrap.dedent(
                """\
            FakeRecogna is a dataset comprised of real and fake news.
            The real news is not directly linked to fake news and vice-versa,
            which could lead to a biased classification.
            The news collection was performed by crawlers developed for mining
            pages of well-known and of great national importance agency news."""
            ),
            citation=textwrap.dedent(
                """\
            @inproceedings{10.1007/978-3-030-98305-5_6,
            author = {Garcia, Gabriel L. and Afonso, Luis C. S. and Papa, Jo\~{a}o P.},
            title = {FakeRecogna: A New Brazilian Corpus for Fake News Detection},
            year = {2022},
            isbn = {978-3-030-98304-8},
            publisher = {Springer-Verlag},
            address = {Berlin, Heidelberg},
            url = {https://doi.org/10.1007/978-3-030-98305-5_6},
            doi = {10.1007/978-3-030-98305-5_6},
            abstract = {Fake news has become a research topic of great importance in Natural 
            Language Processing due to its negative impact on our society. Although its pertinence,
            there are few datasets available in Brazilian Portuguese and mostly comprise few samples.
            Therefore, this paper proposes creating a new fake news dataset named FakeRecogna that
            contains a greater number of samples, more up-to-date news, and covering a few of the 
            most important categories. We perform a toy evaluation over the created dataset using traditional
            classifiers such as Naive Bayes, Optimum-Path Forest, and Support Vector Machines.
            A Convolutional Neural Network is also evaluated in the context of fake news detection
            in the proposed dataset.},
            booktitle = {Computational Processing of the Portuguese Language: 15th International Conference, PROPOR 2022, Fortaleza, Brazil, March 21–23, 2022, Proceedings},
            pages = {57–67},
            numpages = {11},
            keywords = {Fake news, Corpus, Portuguese},
            location = {Fortaleza, Brazil}
            }
            """
            ),
        ),
        GlueConfig(
            name="fakebr",
            description="Fake.Br Corpus is composed of aligned true and fake news written in Brazilian Portuguese.",
            citation=textwrap.dedent(
                """\
            @article{silva:20,
            title = "Towards automatically filtering fake news in Portuguese",
            journal = "Expert Systems with Applications",
            volume = "146",
            pages = "113199",
            year = "2020",
            issn = "0957-4174",
            doi = "https://doi.org/10.1016/j.eswa.2020.113199",
            url = "http://www.sciencedirect.com/science/article/pii/S0957417420300257",
            author = "Renato M. Silva and Roney L.S. Santos and Tiago A. Almeida and Thiago A.S. Pardo",
            }
            """
            ),
        ),
    ]

    DEFAULT_CONFIG_NAME = "fakebr"

    def _info(self):
        if self.config.name == "fakebr":
            features = datasets.Features(
                {
                    "claim_text": datasets.Value("string"),
                    "claim_author": datasets.Value("string"),
                    "claim_url": datasets.Value("string"),
                    "claim_date": datasets.Value("string"),
                    "category": datasets.Value("string"),
                    "is_fake": datasets.ClassLabel(num_classes=3, names=[1, 0, -1]),
                }
            )
        elif self.config.name in ["central_de_fatos", "FakeRecogna"]:
            features = datasets.Features(
                {
                    "review_id": datasets.Value("string"),
                    "review_text": datasets.Value("string"),
                    "review_author": datasets.Value("string"),
                    "review_url": datasets.Value("string"),
                    "review_domain": datasets.Value("string"),
                    "review_date": datasets.Value("string"),
                    "category": datasets.Value("string"),
                    "is_fake": datasets.ClassLabel(num_classes=3, names=[1, 0, -1]),
                }
            )
        elif self.config.name in ["fact_check_tweet_pt", "FakeNewsSet"]:
            features = datasets.Features(
                {
                    "review_id": datasets.Value("string"),
                    "review_url": datasets.Value("string"),
                    "review_domain": datasets.Value("string"),
                    "claim_ids": datasets.Sequence(
                        feature=datasets.Value(dtype="string", id=None)
                    ),
                    "is_fake": datasets.ClassLabel(num_classes=3, names=[1, 0, -1]),
                }
            )
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        url = _URL
        data_dir = dl_manager.download_and_extract(url)
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={
                    "filepath": os.path.join(
                        data_dir, "data", f"{self.config.name}.tsv"
                    ),
                },
            ),
        ]

    def _generate_examples(self, filepath):
        with open(filepath, encoding="utf-8") as f:
            reader = csv.reader(f, delimiter="\t")

            names = next(reader)
            for idx, row in enumerate(reader):
                row = dict(zip(names, row))

                if "claim_ids" in row.keys():
                    row["claim_ids"] = eval(row["claim_ids"])

                yield idx, row