sosialurin-faroese-pos / sosialurin-faroese-pos.py
vesteinn's picture
ner to pos fix
d530faa
raw
history blame
7.55 kB
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
# Modified by Vésteinn Snæbjarnarson 2021
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
LABELS = [
"ACSFPA",
"ACSFPD",
"ACSFPN",
"ACSFSA",
"ACSFSD",
"ACSFSN",
"ACSMPA",
"ACSMPD",
"ACSMPN",
"ACSMSA",
"ACSMSD",
"ACSMSN",
"ACSNPA",
"ACSNPD",
"ACSNPN",
"ACSNSA",
"ACSNSD",
"ACSNSN",
"ACWFPA",
"ACWFPD",
"ACWFPN",
"ACWFSA",
"ACWFSD",
"ACWFSN",
"ACWMPD",
"ACWMPN",
"ACWMSA",
"ACWMSN",
"ACWNPA",
"ACWNPD",
"ACWNPN",
"ACWNSA",
"ACWNSN",
"AI",
"APSFPA",
"APSFPD",
"APSFPN",
"APSFSA",
"APSFSD",
"APSFSN",
"APSMPA",
"APSMPD",
"APSMPN",
"APSMSA",
"APSMSD",
"APSMSN",
"APSNPA",
"APSNPD",
"APSNPN",
"APSNSA",
"APSNSD",
"APSNSN",
"APWFPA",
"APWFPD",
"APWFPG",
"APWFPN",
"APWFSA",
"APWFSD",
"APWFSG",
"APWFSN",
"APWMPA",
"APWMPD",
"APWMPN",
"APWMSA",
"APWMSD",
"APWMSN",
"APWNPA",
"APWNPD",
"APWNPN",
"APWNSA",
"APWNSD",
"APWNSN",
"ASSFPD",
"ASSFPN",
"ASSFSA",
"ASSFSD",
"ASSFSN",
"ASSMPA",
"ASSMPN",
"ASSMSA",
"ASSMSD",
"ASSMSN",
"ASSNPA",
"ASSNPD",
"ASSNPN",
"ASSNSA",
"ASSNSD",
"ASSNSN",
"ASWFPA",
"ASWFPD",
"ASWFPN",
"ASWFSA",
"ASWFSD",
"ASWFSN",
"ASWMPA",
"ASWMPD",
"ASWMPN",
"ASWMSA",
"ASWMSD",
"ASWMSN",
"ASWNPA",
"ASWNPD",
"ASWNPN",
"ASWNSA",
"ASWNSD",
"ASWNSN",
"C",
"CI",
"CR",
"DCG",
"DCN",
"DG",
"DI",
"DN",
"DSG",
"DSN",
"F",
"KC",
"KE",
"KO",
"KQ",
"M",
"NC",
"NCFPA",
"NCFPD",
"NCFPN",
"NCFSA",
"NCFSN",
"NCMPA",
"NCMPD",
"NCMPG",
"NCMPN",
"NCMSA",
"NCMSN",
"NCNPA",
"NCNPD",
"NCNPN",
"NCNSA",
"NCNSD",
"NCNSN",
"NO",
"NP",
"NR",
"PBFPA",
"PBFPD",
"PBFPN",
"PBFSA",
"PBFSD",
"PBFSN",
"PBMPA",
"PBMPD",
"PBMPN",
"PBMSA",
"PBMSD",
"PBMSN",
"PBNPA",
"PBNPD",
"PBNPN",
"PBNSA",
"PBNSD",
"PBNSN",
"PDFPA",
"PDFPD",
"PDFPN",
"PDFSA",
"PDFSD",
"PDFSN",
"PDMPA",
"PDMPD",
"PDMPN",
"PDMSA",
"PDMSD",
"PDMSN",
"PDNPA",
"PDNPD",
"PDNPN",
"PDNSA",
"PDNSD",
"PDNSN",
"PEMPA",
"PEMSA",
"PENSA",
"PENSG",
"PIFPA",
"PIFPD",
"PIFPN",
"PIFSA",
"PIFSD",
"PIFSN",
"PIMPA",
"PIMPD",
"PIMPN",
"PIMSA",
"PIMSD",
"PIMSN",
"PINPA",
"PINPD",
"PINPN",
"PINSA",
"PINSD",
"PINSN",
"PP1PA",
"PP1PD",
"PP1PG",
"PP1PN",
"PP1SA",
"PP1SD",
"PP1SG",
"PP1SN",
"PP2PG",
"PP2PN",
"PP2SA",
"PP2SD",
"PP2SG",
"PP2SN",
"PPFPA",
"PPFPD",
"PPFPG",
"PPFPN",
"PPFSA",
"PPFSD",
"PPFSG",
"PPFSN",
"PPMPA",
"PPMPD",
"PPMPG",
"PPMPN",
"PPMSA",
"PPMSD",
"PPMSG",
"PPMSN",
"PPNPA",
"PPNPD",
"PPNPG",
"PPNPN",
"PPNSA",
"PPNSD",
"PPNSG",
"PPNSN",
"PQFPA",
"PQFPN",
"PQFSA",
"PQFSD",
"PQFSN",
"PQMPN",
"PQMSA",
"PQMSD",
"PQMSN",
"PQNSA",
"PQNSD",
"PQNSN",
"SFPA",
"SFPAA",
"SFPAP",
"SFPD",
"SFPDA",
"SFPDAP",
"SFPDP",
"SFPG",
"SFPGP",
"SFPN",
"SFPNA",
"SFPNP",
"SFSA",
"SFSAA",
"SFSAAP",
"SFSAP",
"SFSD",
"SFSDA",
"SFSDAP",
"SFSDP",
"SFSG",
"SFSGA",
"SFSGP",
"SFSN",
"SFSNA",
"SFSNAP",
"SFSNP",
"SMPA",
"SMPAA",
"SMPD",
"SMPDA",
"SMPDP",
"SMPG",
"SMPGA",
"SMPN",
"SMPNA",
"SMSA",
"SMSAA",
"SMSAP",
"SMSD",
"SMSDA",
"SMSDAP",
"SMSDP",
"SMSG",
"SMSGA",
"SMSGP",
"SMSN",
"SMSNA",
"SMSNAP",
"SMSNP",
"SNPA",
"SNPAA",
"SNPD",
"SNPDA",
"SNPDP",
"SNPG",
"SNPGA",
"SNPN",
"SNPNA",
"SNPNP",
"SNSA",
"SNSAA",
"SNSAAP",
"SNSAP",
"SNSD",
"SNSDA",
"SNSDAP",
"SNSDP",
"SNSG",
"SNSGA",
"SNSGP",
"SNSN",
"SNSNA",
"SNSNAP",
"SNSNP",
"SX",
"SXP",
"SXSD",
"SXSG",
"TS",
"TT",
"VAFPA",
"VAFPD",
"VAFPN",
"VAFSA",
"VAFSD",
"VAFSN",
"VAMPA",
"VAMPD",
"VAMPN",
"VAMSA",
"VAMSD",
"VAMSN",
"VANPA",
"VANPD",
"VANPN",
"VANSA",
"VANSD",
"VANSN",
"VE",
"VEAP",
"VEAS2",
"VEAS3",
"VEPP",
"VEPS1",
"VEPS2",
"VEPS3",
"VI",
"VMP",
"VMS",
"VNAP",
"VNAS1",
"VNAS2",
"VNAS3",
"VNPP",
"VNPS1",
"VNPS2",
"VNPS3",
"VP",
"W",
"X"
]
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@misc{sosialurin-pos,
title = {Marking av teldutøkum tekstsavn},
author = {Zakaris Svabo Hansen, Heini Justinussen, and Mortan
Ólason},
url = {http://ark.axeltra.com/index.php?type=person&lng=en&id=18},
year = {2004} }
"""
_DESCRIPTION = """\
The corpus that has been created consists of ca. 100.000 words of text from the [Faroese] newspaper Sosialurin. Each word is tagged with grammatical information (word class, gender, number etc.)
"""
_URL = "https://huggingface.co/datasets/vesteinn/sosialurin-faroese-pos/raw/main/"
_TRAINING_FILE = "fo.revised.txt"
class SosialurinPOSConfig(datasets.BuilderConfig):
"""BuilderConfig for sosialurin-faroese-pos"""
def __init__(self, **kwargs):
"""BuilderConfig for sosialurin-faroese-pos.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(SosialurinPOSConfig, self).__init__(**kwargs)
class SosialurinPOS(datasets.GeneratorBasedBuilder):
"""sosialurin-faroese-pos dataset."""
BUILDER_CONFIGS = [
SosialurinPOSConfig(name="sosialurin-faroese-pos", version=datasets.Version("0.1.0"), description="sosialurin-faroese-pos dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"pos_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=LABELS
)
),
}
),
supervised_keys=None,
homepage="http://ark.axeltra.com/index.php?type=person&lng=en&id=18",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
tokens = []
pos_tags = []
for line in f:
if line.startswith("-DOCSTART-") or line == "" or line == "\n":
if tokens:
yield guid, {
"id": str(guid),
"tokens": tokens,
"pos_tags": pos_tags,
}
guid += 1
tokens = []
pos_tags = []
else:
# tokens are tab separated
splits = line.split("\t")
tokens.append(splits[0])
try:
pos_tags.append(splits[1].rstrip())
except:
print(splits)
raise
# last example
yield guid, {
"id": str(guid),
"tokens": tokens,
"pos_tags": pos_tags,
}