# coding=utf-8 # Source: https://github.com/huggingface/datasets/blob/master/templates/new_dataset_script.py """ANTILLES Corpus""" import os import datasets from tqdm import tqdm logger = datasets.logging.get_logger(__name__) _CITATION = """ @misc{ universaldependencies, title={UniversalDependencies/UD_French-GSD}, url={https://github.com/UniversalDependencies/UD_French-GSD}, journal={GitHub}, author={UniversalDependencies} } @inproceedings{mcdonald-etal-2013-universal, title = {{U}niversal {D}ependency Annotation for Multilingual Parsing}, author = { McDonald, Ryan and Nivre, Joakim and Quirmbach-Brundage, Yvonne and Goldberg, Yoav and Das, Dipanjan and Ganchev, Kuzman and Hall, Keith and Petrov, Slav and Zhang, Hao and Tackstrom, Oscar and Bedini, Claudia and Bertomeu Castello, Nuria and Lee, Jungmee }, booktitle = {Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (Volume 2: Short Papers)}, month = aug, year = {2013}, address = {Sofia, Bulgaria}, publisher = {Association for Computational Linguistics}, url = {https://aclanthology.org/P13-2017}, pages = {92--97", } @techreport{ LIA_TAGG, author = {Frédéric Béchet}, title = {LIA_TAGG: a statistical POS tagger + syntactic bracketer}, institution = {Aix-Marseille University & CNRS}, year = {2001} } """ _LICENSE = """ For the following languages German, Spanish, French, Indonesian, Italian, Japanese, Korean and Brazilian Portuguese we will distinguish between two portions of the data. 1. The underlying text for sentences that were annotated. This data Google asserts no ownership over and no copyright over. Some or all of these sentences may be copyrighted in some jurisdictions. Where copyrighted, Google collected these sentences under exceptions to copyright or implied license rights. GOOGLE MAKES THEM AVAILABLE TO YOU 'AS IS', WITHOUT ANY WARRANTY OF ANY KIND, WHETHER EXPRESS OR IMPLIED. 2. The annotations -- part-of-speech tags and dependency annotations. These are made available under a CC BY-SA 4.0. GOOGLE MAKES THEM AVAILABLE TO YOU 'AS IS', WITHOUT ANY WARRANTY OF ANY KIND, WHETHER EXPRESS OR IMPLIED. See attached LICENSE file for the text of CC BY-NC-SA. Portions of the German data were sampled from the CoNLL 2006 Tiger Treebank data. Hans Uszkoreit graciously gave permission to use the underlying sentences in this data as part of this release. Any use of the data should reference the above plus: Universal Dependency Annotation for Multilingual Parsing Ryan McDonald, Joakim Nivre, Yvonne Quirmbach-Brundage, Yoav Goldberg, Dipanjan Das, Kuzman Ganchev, Keith Hall, Slav Petrov, Hao Zhang, Oscar Tackstrom, Claudia Bedini, Nuria Bertomeu Castello and Jungmee Lee Proceedings of ACL 2013 """ _DESCRIPTION = "No description" _URLS = { "ANTILLES": "https://huggingface.co/datasets/qanastek/ANTILLES/resolve/main/ANTILLES.zip" } class ANTILLES(datasets.GeneratorBasedBuilder): """ANTILLES dataset.""" VERSION = datasets.Version("1.1.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="ANTILLES", version=VERSION, description="The ANTILLES corpora"), ] DEFAULT_CONFIG_NAME = "ANTILLES" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "pos_tags": datasets.Sequence( datasets.features.ClassLabel( names = ['PART', 'PDEMMP', 'PREFS', 'PINDMP', 'DINTMS', 'NUM', 'PINTFS', 'NFP', 'PUNCT', 'PRELMS', 'NOUN', 'PPER3MS', 'AUX', 'COSUB', 'ADJ', 'VPPRE', 'COCO', 'ADJMP', 'X', 'NMS', 'PINDMS', 'DETFS', 'PPER2S', 'PREFP', 'PPER3MP', 'PRELMP', 'PINDFS', 'PRON', 'PREP', 'PPOBJMP', 'ADJFS', 'DET', 'ADJFP', 'PDEMFP', 'PREL', 'PPER3FS', 'VPPFS', 'PPER3FP', 'CHIF', 'NMP', 'SYM', 'NFS', 'VERB', 'PREF', 'VPPFP', 'PDEMMS', 'XFAMIL', 'PINDFP', 'VPPMP', 'YPFOR', 'ADV', 'PRELFS', 'DINTFS', 'DETMS', 'PPOBJFP', 'PPOBJMS', 'VPPMS', 'INTJ', 'PROPN', 'PDEMFS', 'PPER1S', 'PRELFP', 'MOTINC', 'ADJMS', 'PPOBJFS'] ) ), } ), supervised_keys=None, homepage="https://github.com/qanastek/ANTILLES", citation=_CITATION, license=_LICENSE, ) def _split_generators(self, dl_manager): urls = _URLS[self.config.name] data_dir = dl_manager.download_and_extract(urls) TRAIN_PATH = 'train.conllu' DEV_PATH = 'dev.conllu' TEST_PATH = 'test.conllu' return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": os.path.join(data_dir, TRAIN_PATH), "split": "train", } ), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={ "filepath": os.path.join(data_dir, DEV_PATH), "split": "dev", } ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": os.path.join(data_dir, TEST_PATH), "split": "test", } ), ] def _generate_examples(self, filepath, split): logger.info("⏳ Generating examples from = %s", filepath) with open(filepath, encoding="utf-8") as f: guid = 0 tokens = [] pos_tags = [] for line in tqdm(f): if "#" in line or line == "" or line == "\n": if tokens: yield guid, { "id": str(guid), "tokens": tokens, "pos_tags": pos_tags, } guid += 1 tokens = [] pos_tags = [] else: splits = line.split('\t') tokens.append(splits[1]) pos_tags.append(splits[3].rstrip() if "_" not in splits[3] else "X") yield guid, { "id": str(guid), "tokens": tokens, "pos_tags": pos_tags, }