import csv import json import os import datasets from typing import Union _CITATION = """\ @inproceedings{laskina2024Creating, title = "Creating Clustered Comparable Corpora from Wikipedia with Different Fuzziness Levels and Language Representativity", author = "Anna Laskina, Eric Gaussier and Gaelle Calvary", year = "2024", abstract = "This paper is dedicated to the extraction of clustered comparable corpora from Wikipedia, that is comparable corpora with labelled information corresponding to the topics associated to each document. Despite the importance of such corpora for evaluating text clustering and classification methods in the context of comparable corpora, there is a notable absence of automatic algorithms capable of creating them with adjustable fuzziness levels and language representativity. The methodology we propose here offers control over the cluster distribution across languages, enables fine-tuning of fuzziness levels, and facilitates customization to accommodate specific subject areas. Moreover, we have developed a dedicated tool specifically designed for our purpose and present 18 bilingual clustered comparable corpora spanning English, French, German, Russian, and Swedish languages. The analysis of these corpora demonstrates the effectiveness and flexibility of the approach in constructing corpora with varying levels of fuzziness and language representativity. Our results, tool and corpora, pave the way to construct various gold standard collections for future research in clustering and classification in comparable corpora.", booktitle = "Proceedings of the 17rd Workshop on Building and Using Comparable Corpora", } """ _DESCRIPTION = """\ A Clustered Comparable Corpora from Wikipedia. """ _HOMEPAGE = "" _LICENSE = """\ GNU AFFERO GENERAL PUBLIC LICENSE Version 3, 19 November 2007 """ _URL = "" _BASE_DATA_URL = "dataset_{corpus_id}" _CORPUS_NAME = "wikicorpus_{corpus_id}.json" _INFO_NAME = "topic_information_{corpus_id}.json" _ID_LIST = ['v5_0', 'v5_1', 'v5_2', 'v5_3', 'v5_5', 'v5_7', 'v5_8', 'v5_9', 'v5_10'] class WikiCCCConfig(datasets.BuilderConfig): """BuilderConfig for Wikipedia CCC dataset.""" def __init__(self, corpus_id: str, data_dir: str, **kwargs): """ Args: corpus_id: `string`, the name of dataset data_dir: `string`, the path to the folder containing the dataset files **kwargs: keyword arguments forwarded to super. """ super(WikiCCCConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) self.corpus_id = corpus_id self.base_data_dir = os.path.join(data_dir, _BASE_DATA_URL.format(corpus_id=corpus_id)) class NewDataset(datasets.GeneratorBasedBuilder): BUILDER_CONFIG_CLASS = WikiCCCConfig BUILDER_CONFIGS = [ WikiCCCConfig(name=corpus_id, corpus_id=corpus_id, data_dir=_URL) for corpus_id in _ID_LIST ] DEFAULT_CONFIG_NAME = "v0_0" def _info(self): features = datasets.Features( { "id": datasets.Value("int64"), "language": datasets.Value("string"), "text": datasets.Value("string"), "label": datasets.Sequence(datasets.Value("int64")), } ) return datasets.DatasetInfo( description=_DESCRIPTION, features=features, supervised_keys=None, # ("id", "label"), homepage=_HOMEPAGE, citation=_CITATION, license=_LICENSE, ) def _split_generators(self, dl_manager: datasets.utils.download_manager.DownloadManager): url = os.path.join(self.config.base_data_dir, _CORPUS_NAME.format(corpus_id=self.config.corpus_id)) downloaded_file = dl_manager.download(url) return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_file}), ] def _generate_examples(self, filepath: Union[str, os.PathLike]): """This function handles input defined in _split_generators to yield (key, example) tuples from the dataset.""" with open(filepath, encoding="utf-8") as f: data = json.load(f) for key, doc in enumerate(data): yield key, { "id": int(doc["id"]), "language": doc["language"], "text": doc["text"], "label": doc["label"], }