from pathlib import Path import datasets import pandas as pd logger = datasets.logging.get_logger(__name__) _CITATION = """\ @inproceedings{augustyniak-etal-2020-political, title = "Political Advertising Dataset: the use case of the Polish 2020 Presidential Elections", author = "Augustyniak, Lukasz and Rajda, Krzysztof and Kajdanowicz, Tomasz and Bernaczyk, Micha{\l}", booktitle = "Proceedings of the The Fourth Widening Natural Language Processing Workshop", month = jul, year = "2020", address = "Seattle, USA", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/2020.winlp-1.28", pages = "110--114" } """ _DESCRIPTION = "Polish Political Advertising Dataset" _HOMEPAGE = "https://github.com/laugustyniak/misinformation" _URLS = { "train": "https://huggingface.co/datasets/laugustyniak/political-advertising-pl/resolve/main/train.parquet", "test": "https://huggingface.co/datasets/laugustyniak/political-advertising-pl/resolve/main/test.parquet", "validation": "https://huggingface.co/datasets/laugustyniak/political-advertising-pl/resolve/main/dev.parquet", } DATA_PATH = Path(".") class PoliticalAdvertisingConfig(datasets.BuilderConfig): def __init__(self, **kwargs): super(PoliticalAdvertisingConfig, self).__init__(**kwargs) class PoliticalAdvertisingDataset(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.0.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="political-advertising-pl", version=VERSION) ] def _info(self): features = datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "tags": datasets.Sequence( datasets.features.ClassLabel( names=[ "O", "B-DEFENSE_AND_SECURITY", "I-DEFENSE_AND_SECURITY", "B-EDUCATION", "I-EDUCATION", "B-FOREIGN_POLICY", "I-FOREIGN_POLICY", "B-HEALHCARE", "I-HEALHCARE", "B-IMMIGRATION", "I-IMMIGRATION", "B-INFRASTRUCTURE_AND_ENVIROMENT", "I-INFRASTRUCTURE_AND_ENVIROMENT", "B-POLITICAL_AND_LEGAL_SYSTEM", "I-POLITICAL_AND_LEGAL_SYSTEM", "B-SOCIETY", "I-SOCIETY", "B-WELFARE", "I-WELFARE", ] ) ), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, homepage=_HOMEPAGE, citation=_CITATION, ) def _split_generators(self, dl_manager): downloaded_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}, ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["validation"]}, ), ] def _generate_examples(self, filepath: str): logger.info("⏳ Generating examples from = %s", filepath) df = pd.read_parquet(filepath) for row_id, row in df.iterrows(): yield row_id, { "id": str(row_id), "tokens": row.tokens, "tags": row.tags, }