|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""FactChecksbr dataset""" |
|
|
|
import textwrap |
|
import csv |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
_CITATION = """\ |
|
@misc{FactChecksbr, |
|
author = {R. S. Gomes, Juliana}, |
|
title = {FactChecks.br}, |
|
url = {https://github.com/fake-news-UFG/FactChecks.br}, |
|
doi = { 10.57967/hf/1016 }, |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
Collection of Portuguese Fact-Checking Benchmarks. |
|
""" |
|
|
|
_HOMEPAGE = "https://github.com/fake-news-UFG/FactChecks.br" |
|
|
|
_LICENSE = "https://raw.githubusercontent.com/fake-news-UFG/FactChecks.br/main/LICENSE" |
|
|
|
_URL = "https://github.com/fake-news-UFG/FactChecks.br/releases/download/v0.1/FactChecksbr.zip" |
|
|
|
|
|
|
|
class GlueConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for GLUE.""" |
|
|
|
def __init__( |
|
self, |
|
citation, |
|
**kwargs, |
|
): |
|
super(GlueConfig, self).__init__( |
|
version=datasets.Version("0.1.0", ""), **kwargs |
|
) |
|
self.citation = citation |
|
|
|
|
|
class FactChecksbr(datasets.GeneratorBasedBuilder): |
|
"""Collection of Portuguese Fact-Checking Benchmarks.""" |
|
|
|
VERSION = datasets.Version("0.1.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
GlueConfig( |
|
name="fact_check_tweet_pt", |
|
description="", |
|
citation=textwrap.dedent( |
|
"""\ |
|
@misc{kazemi2022matching, |
|
title={Matching Tweets With Applicable Fact-Checks Across Languages}, |
|
author={Ashkan Kazemi and Zehua Li and Verónica Pérez-Rosas and Scott A. Hale and Rada Mihalcea}, |
|
year={2022}, |
|
eprint={2202.07094}, |
|
archivePrefix={arXiv}, |
|
primaryClass={cs.CL} |
|
} |
|
""" |
|
), |
|
), |
|
GlueConfig( |
|
name="central_de_fatos", |
|
description=textwrap.dedent( |
|
"""\ |
|
In recent times, the interest for research dissecting the dissemination |
|
and prevention of misinformation in the online environment has spiked dramatically. |
|
Given that scenario, a recurring obstacle is the unavailability of public datasets |
|
containing fact-checked instances.""" |
|
), |
|
citation=textwrap.dedent( |
|
"""\ |
|
@inproceedings{dsw, |
|
author = {João Couto and Breno Pimenta and Igor M. de Araújo and Samuel Assis and Julio C. S. Reis and Ana Paula da Silva and Jussara Almeida and Fabrício Benevenuto}, |
|
title = {Central de Fatos: Um Repositório de Checagens de Fatos}, |
|
booktitle = {Anais do III Dataset Showcase Workshop}, |
|
location = {Rio de Janeiro}, |
|
year = {2021}, |
|
keywords = {}, |
|
issn = {0000-0000}, |
|
pages = {128--137}, |
|
publisher = {SBC}, |
|
address = {Porto Alegre, RS, Brasil}, |
|
doi = {10.5753/dsw.2021.17421}, |
|
url = {https://sol.sbc.org.br/index.php/dsw/article/view/17421} |
|
} |
|
""" |
|
), |
|
), |
|
GlueConfig( |
|
name="FakeNewsSet", |
|
description="", |
|
citation=textwrap.dedent( |
|
"""\ |
|
@inproceedings{10.1145/3428658.3430965, |
|
author = {da Silva, Fl\'{a}vio Roberto Matias and Freire, |
|
Paulo M\'{a}rcio Souza and de Souza, Marcelo Pereira and de A. B. Plenamente, |
|
Gustavo and Goldschmidt, Ronaldo Ribeiro}, |
|
title = {FakeNewsSetGen: A Process to Build Datasets That |
|
Support Comparison Among Fake News Detection Methods}, |
|
year = {2020}, |
|
isbn = {9781450381963}, |
|
publisher = {Association for Computing Machinery}, |
|
address = {New York, NY, USA}, |
|
url = {https://doi.org/10.1145/3428658.3430965}, |
|
doi = {10.1145/3428658.3430965}, |
|
abstract = {Due to easy access and low cost, social media online |
|
news consumption has increased significantly for the last decade. |
|
Despite their benefits, some social media allow anyone to post news |
|
with intense spreading power, which amplifies an old problem: the dissemination of Fake News. |
|
In the face of this scenario, several machine learning-based methods |
|
to automatically detect Fake News (MLFN) have been proposed. |
|
All of them require datasets to train and evaluate their detection models. |
|
Although recent MLFN were designed to consider data regarding the news propagation |
|
on social media, most of the few available datasets do not contain this kind of data. |
|
Hence, comparing the performances amid those recent MLFN and the others is restricted |
|
to a very limited number of datasets. Moreover, all existing datasets with propagation |
|
data do not contain news in Portuguese, which impairs the evaluation of the MLFN in this language. |
|
Thus, this work proposes FakeNewsSetGen, a process that builds Fake News datasets that contain news |
|
propagation data and support comparison amid the state-of-the-art MLFN. FakeNewsSetGen's software |
|
engineering process was guided to include all kind of data required by the existing MLFN. In order |
|
to illustrate FakeNewsSetGen's viability and adequacy, a case study was carried out. It encompassed |
|
the implementation of a FakeNewsSetGen prototype and the application of this prototype to create a |
|
dataset called FakeNewsSet, with news in Portuguese. Five MLFN with different kind of data requirements |
|
(two of them demanding news propagation data) were applied to FakeNewsSet and compared, demonstrating the |
|
potential use of both the proposed process and the created dataset.}, |
|
booktitle = {Proceedings of the Brazilian Symposium on Multimedia and the Web}, |
|
pages = {241–248}, |
|
numpages = {8}, |
|
keywords = {Fake News detection, Dataset building process, social media}, |
|
location = {S\~{a}o Lu\'{\i}s, Brazil}, |
|
series = {WebMedia '20} |
|
} |
|
""" |
|
), |
|
), |
|
GlueConfig( |
|
name="FakeRecogna", |
|
description=textwrap.dedent( |
|
"""\ |
|
FakeRecogna is a dataset comprised of real and fake news. |
|
The real news is not directly linked to fake news and vice-versa, |
|
which could lead to a biased classification. |
|
The news collection was performed by crawlers developed for mining |
|
pages of well-known and of great national importance agency news.""" |
|
), |
|
citation=textwrap.dedent( |
|
"""\ |
|
@inproceedings{10.1007/978-3-030-98305-5_6, |
|
author = {Garcia, Gabriel L. and Afonso, Luis C. S. and Papa, Jo\~{a}o P.}, |
|
title = {FakeRecogna: A New Brazilian Corpus for Fake News Detection}, |
|
year = {2022}, |
|
isbn = {978-3-030-98304-8}, |
|
publisher = {Springer-Verlag}, |
|
address = {Berlin, Heidelberg}, |
|
url = {https://doi.org/10.1007/978-3-030-98305-5_6}, |
|
doi = {10.1007/978-3-030-98305-5_6}, |
|
abstract = {Fake news has become a research topic of great importance in Natural |
|
Language Processing due to its negative impact on our society. Although its pertinence, |
|
there are few datasets available in Brazilian Portuguese and mostly comprise few samples. |
|
Therefore, this paper proposes creating a new fake news dataset named FakeRecogna that |
|
contains a greater number of samples, more up-to-date news, and covering a few of the |
|
most important categories. We perform a toy evaluation over the created dataset using traditional |
|
classifiers such as Naive Bayes, Optimum-Path Forest, and Support Vector Machines. |
|
A Convolutional Neural Network is also evaluated in the context of fake news detection |
|
in the proposed dataset.}, |
|
booktitle = {Computational Processing of the Portuguese Language: 15th International Conference, PROPOR 2022, Fortaleza, Brazil, March 21–23, 2022, Proceedings}, |
|
pages = {57–67}, |
|
numpages = {11}, |
|
keywords = {Fake news, Corpus, Portuguese}, |
|
location = {Fortaleza, Brazil} |
|
} |
|
""" |
|
), |
|
), |
|
GlueConfig( |
|
name="fakebr", |
|
description="Fake.Br Corpus is composed of aligned true and fake news written in Brazilian Portuguese.", |
|
citation=textwrap.dedent( |
|
"""\ |
|
@article{silva:20, |
|
title = "Towards automatically filtering fake news in Portuguese", |
|
journal = "Expert Systems with Applications", |
|
volume = "146", |
|
pages = "113199", |
|
year = "2020", |
|
issn = "0957-4174", |
|
doi = "https://doi.org/10.1016/j.eswa.2020.113199", |
|
url = "http://www.sciencedirect.com/science/article/pii/S0957417420300257", |
|
author = "Renato M. Silva and Roney L.S. Santos and Tiago A. Almeida and Thiago A.S. Pardo", |
|
} |
|
""" |
|
), |
|
), |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "fakebr" |
|
|
|
def _info(self): |
|
if self.config.name == "fakebr": |
|
features = datasets.Features( |
|
{ |
|
"claim_text": datasets.Value("string"), |
|
"claim_author": datasets.Value("string"), |
|
"claim_url": datasets.Value("string"), |
|
"claim_date": datasets.Value("string"), |
|
"category": datasets.Value("string"), |
|
"is_fake": datasets.ClassLabel(num_classes=3, names=[1, 0, -1]), |
|
} |
|
) |
|
elif self.config.name in ["central_de_fatos", "FakeRecogna"]: |
|
features = datasets.Features( |
|
{ |
|
"review_id": datasets.Value("string"), |
|
"review_text": datasets.Value("string"), |
|
"review_author": datasets.Value("string"), |
|
"review_url": datasets.Value("string"), |
|
"review_domain": datasets.Value("string"), |
|
"review_date": datasets.Value("string"), |
|
"category": datasets.Value("string"), |
|
"is_fake": datasets.ClassLabel(num_classes=3, names=[1, 0, -1]), |
|
} |
|
) |
|
elif self.config.name in ["fact_check_tweet_pt", "FakeNewsSet"]: |
|
features = datasets.Features( |
|
{ |
|
"review_id": datasets.Value("string"), |
|
"review_url": datasets.Value("string"), |
|
"review_domain": datasets.Value("string"), |
|
"claim_ids": datasets.Sequence( |
|
feature=datasets.Value(dtype="string", id=None) |
|
), |
|
"is_fake": datasets.ClassLabel(num_classes=3, names=[1, 0, -1]), |
|
} |
|
) |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
url = _URL |
|
data_dir = dl_manager.download_and_extract(url) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"filepath": os.path.join( |
|
data_dir, "data", f"{self.config.name}.tsv" |
|
), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
with open(filepath, encoding="utf-8") as f: |
|
reader = csv.reader(f, delimiter="\t") |
|
|
|
names = next(reader) |
|
for idx, row in enumerate(reader): |
|
row = dict(zip(names, row)) |
|
|
|
if "claim_ids" in row.keys(): |
|
row["claim_ids"] = eval(row["claim_ids"]) |
|
|
|
yield idx, row |
|
|