# coding=utf-8 # Lint as: python3 import os import itertools import datasets logger = datasets.logging.get_logger(__name__) class ConllConfig(datasets.BuilderConfig): """BuilderConfig for Conll""" def __init__( self, data_files=None, separator: str = " ", # conll2003 tokens are space separated tag_index: int = -1, # conll2003 ner tags are in the last item of each row **kwargs ): """BuilderConfig forConll. Args: **kwargs: keyword arguments forwarded to super. """ super(ConllConfig, self).__init__(**kwargs) self.data_files = data_files self.separator = separator self.tag_index = tag_index class Conll(datasets.GeneratorBasedBuilder): """Conll dataset.""" BUILDER_CONFIGS = [ ConllConfig(name="conll", version=datasets.Version("1.0.0"), description="Conll dataset"), ] DEFAULT_CONFIG_NAME = "conll" def _info(self): return datasets.DatasetInfo( description="Conll dataset", features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "ner_tags": datasets.Sequence(datasets.Value("string")), } ), supervised_keys=None, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" if not self.config.data_files: raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}") data_files = dl_manager.download_and_extract(self.config.data_files) if isinstance(data_files, (str, list, tuple)): files = data_files if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"files": files})] splits = [] for split_name, files in data_files.items(): if isinstance(files, str): files = [files] files = [dl_manager.iter_files(file) for file in files] splits.append(datasets.SplitGenerator(name=split_name, gen_kwargs={"files": files})) return splits def _generate_examples(self, files): for file_idx, filepath in enumerate(itertools.chain.from_iterable(files)): logger.info("⏳ Generating examples from = %s", filepath) with open(filepath, encoding="utf-8") as f: guid = 0 tokens = [] ner_tags = [] for line in f: if line.startswith("-DOCSTART-") or line == "" or line == "\n": if tokens: yield guid, { "id": str(guid), "tokens": tokens, "ner_tags": ner_tags, } guid += 1 tokens = [] ner_tags = [] else: splits = line.split(self.config.separator) tokens.append(splits[0]) ner_tags.append(splits[self.config.tag_index].strip()) # last example if tokens: yield guid, { "id": str(guid), "tokens": tokens, "ner_tags": ner_tags, }