# coding=utf-8 # Copyright 2022 HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """UnSilenceVOC dataset""" import datasets import re from datasets import ClassLabel, Sequence, Value _CITATION = """\ TODO """ _DESCRIPTION = """\ TODO """ NE_MAIN_LABELS = [ "B-Organization", "B-Organization,B-Place", "B-Organization,I-Person", "B-Organization,I-Place", "B-Person", "B-Person,B-Place", "B-Person,I-Place", "B-Place", "I-Organization", "I-Organization,B-Place", "I-Organization,I-Person", "I-Organization,I-Person,B-Place", "I-Organization,I-Person,I-Place", "I-Organization,I-Place", "I-Person", "I-Person,B-Place", "I-Person,I-Place", "I-Place", "O", ] NE_PER_NAME = ["I-ProperName", "O", "B-ProperName", ""] NE_PER_GENDER = [ "B-Group", "B-Man", "B-Man,B-Unspecified", "B-Man,I-Woman", "B-Unspecified", "B-Unspecified,I-Woman", "B-Woman", "I-Group", "I-Man", "I-Man,I-Unspecified", "I-Man,I-Woman", "I-Unspecified", "I-Unspecified,I-Woman", "I-Woman", "NE-PER-GENDER", "O", ] NE_PER_LEGAL_STATUS = [ "B-Enslaved", "B-Freed", "B-Unspecified", "I-Enslaved", "I-Freed", "I-Unspecified", "NE-PER-LEGAL-STATUS", "O", ] NE_PER_ROLE = [ "B-Acting_Notary", "B-Beneficiary", "B-Notary", "B-Other", "B-Testator", "B-Testator_Beneficiary", "B-Witness", "I-Acting_Notary", "I-Beneficiary", "I-Beneficiary,B-Other", "I-Beneficiary,I-Other", "I-Notary", "I-Other", "I-Testator", "I-Testator_Beneficiary", "I-Witness", "NE-PER-ROLE", "O", ] NE_ORG_BENEFICIARY = [ "B-No", "B-Yes", "I-No", "I-Yes", "NE-ORG-BENEFICIARY", "O", ] _BASE_URL = ( "https://raw.githubusercontent.com/budh333/UnSilence_VOC/main/processed_data" ) _URLS = { "train": f"{_BASE_URL}/train-nl.tsv", "test": f"{_BASE_URL}/test-nl.tsv", "dev": f"{_BASE_URL}/dev-nl.tsv", } class UnSilenceVOC(datasets.GeneratorBasedBuilder): """UnSilence VOC dataset.""" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "tokens": Sequence(datasets.Value("string")), "NE-MAIN": Sequence( ClassLabel(names=NE_MAIN_LABELS), ), "NE-PER-NAME": Sequence(ClassLabel(names=NE_PER_NAME)), "NE-PER-GENDER": Sequence(ClassLabel(names=NE_PER_GENDER)), "NE-PER-LEGAL-STATUS": Sequence( ClassLabel(names=NE_PER_LEGAL_STATUS) ), "NE-PER-ROLE": Sequence(ClassLabel(names=NE_PER_ROLE)), "NE-ORG-BENEFICIARY": Sequence( ClassLabel(names=NE_ORG_BENEFICIARY) ), "MISC": Value("string"), "document_id": datasets.Value("string"), } ), homepage="TODO", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" downloaded_files = dl_manager.download_and_extract(_URLS) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}, ) ] def _generate_examples(self, filepath): document_id_re = re.compile(r"# document_path = ..(\/.*.txt)") with open(filepath, encoding="utf-8") as f: guid = 0 tokens = [] NE_MAIN_LABELS = [] NE_PER_NAME = [] NE_PER_GENDER = [] NE_PER_LEGAL_STATUS = [] NE_PER_ROLE = [] NE_ORG_BENEFICIARY = [] MISC = [] for line in f: if line.startswith("TOKEN"): continue if line.startswith("#") or line.startswith("\t"): document_id_match = re.search(document_id_re, line) if document_id_match: document_id = document_id_match.groups(0)[0] if not tokens: continue yield guid, { "tokens": tokens, "NE-MAIN": NE_MAIN_LABELS, "NE-PER-NAME": NE_PER_NAME, "NE-PER-GENDER": NE_PER_GENDER, "NE-PER-LEGAL-STATUS": NE_PER_LEGAL_STATUS, "NE-PER-ROLE": NE_PER_ROLE, "NE-ORG-BENEFICIARY": NE_ORG_BENEFICIARY, "MISC": MISC, "document_id": document_id, } guid += 1 tokens = [] NE_MAIN_LABELS = [] NE_PER_NAME = [] NE_PER_GENDER = [] NE_PER_LEGAL_STATUS = [] NE_PER_ROLE = [] NE_ORG_BENEFICIARY = [] MISC = [] else: # tokens are tab separated splits = line.split("\t") tokens.append(splits[0]) NE_MAIN_LABELS.append(splits[1]) NE_PER_NAME.append(splits[2]) NE_PER_GENDER.append(splits[3]) NE_PER_LEGAL_STATUS.append(splits[4]) NE_PER_ROLE.append(splits[5]) NE_ORG_BENEFICIARY.append(splits[6]) MISC.append(splits[-1].replace("\n", "")) # last example yield guid, { "tokens": tokens, "NE-MAIN": NE_MAIN_LABELS, "NE-PER-NAME": NE_PER_NAME, "NE-PER-GENDER": NE_PER_GENDER, "NE-PER-LEGAL-STATUS": NE_PER_LEGAL_STATUS, "NE-PER-ROLE": NE_PER_ROLE, "NE-ORG-BENEFICIARY": NE_ORG_BENEFICIARY, "MISC": MISC, "document_id": "document_id", }