Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
found
Annotations Creators:
machine-generated
Source Datasets:
original
Tags:
License:
twitter_pos_vcb / twitter_pos_vcb.py
leondz's picture
first pass
48b1c1a
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{derczynski2013twitter,
title={Twitter part-of-speech tagging for all: Overcoming sparse and noisy data},
author={Derczynski, Leon and Ritter, Alan and Clark, Sam and Bontcheva, Kalina},
booktitle={Proceedings of the international conference recent advances in natural language processing ranlp 2013},
pages={198--206},
year={2013}
}
"""
_DESCRIPTION = """\
Part-of-speech information is basic NLP task. However, Twitter text
is difficult to part-of-speech tag: it is noisy, with linguistic errors and idiosyncratic style.
This data is the vote-constrained bootstrapped data generate to support state-of-the-art results.
The data is about 1.5 million English tweets annotated for part-of-speech using Ritter's extension of the PTB tagset.
The tweets are from 2012 and 2013, tokenized using the GATE tokenizer and tagged
jointly using the CMU ARK tagger and Ritter's T-POS tagger. Only when both these taggers' outputs
are completely compatible over a whole tweet, is that tweet added to the dataset.
This data is recommend for use a training data **only**, and not evaluation data.
For more details see https://gate.ac.uk/wiki/twitter-postagger.html and https://aclanthology.org/R13-1026.pdf
"""
_URL = "http://downloads.gate.ac.uk/twitter/twitter_bootstrap_corpus.tar.gz"
_TRAINING_FILE = "gate_twitter_bootstrap_corpus.1543K.tokens"
class TwitterPosVcbConfig(datasets.BuilderConfig):
"""BuilderConfig for TwitterPosVcb"""
def __init__(self, **kwargs):
"""BuilderConfig forConll2003.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(TwitterPosVcbConfig, self).__init__(**kwargs)
class TwitterPosVcb(datasets.GeneratorBasedBuilder):
"""TwitterPosVcb dataset."""
BUILDER_CONFIGS = [
TwitterPosVcbConfig(name="twitter-pos-vcb", version=datasets.Version("1.0.0"), description="English Twitter PoS bootstrap dataset"),
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"pos_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
'"',
"''",
"#",
"$",
"(",
")",
",",
".",
":",
"``",
"CC",
"CD",
"DT",
"EX",
"FW",
"IN",
"JJ",
"JJR",
"JJS",
"LS",
"MD",
"NN",
"NNP",
"NNPS",
"NNS",
"NN|SYM",
"PDT",
"POS",
"PRP",
"PRP$",
"RB",
"RBR",
"RBS",
"RP",
"SYM",
"TO",
"UH",
"VB",
"VBD",
"VBG",
"VBN",
"VBP",
"VBZ",
"WDT",
"WP",
"WP$",
"WRB",
"RT",
"HT",
"USR",
"URL",
]
)
),
}
),
supervised_keys=None,
homepage="https://gate.ac.uk/wiki/twitter-postagger.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_file = dl_manager.download_and_extract(_URL)
data_files = {
"train": os.path.join(downloaded_file, _TRAINING_FILE),
}
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
for line in f:
tokens = []
pos_tags = []
if line.startswith("-DOCSTART-") or line.strip() == "" or line == "\n":
continue
else:
# twitter-pos-vcb gives one seq per line, as token_tag
annotated_words = line.strip().split(' ')
tokens = ['_'.join(token.split('_')[:-1]) for token in annotated_words]
pos_tags = [token.split('_')[-1] for token in annotated_words]
yield guid, {
"id": str(guid),
"tokens": tokens,
"pos_tags": pos_tags,
}
guid += 1