Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10K<n<100K
Language Creators:
found
Annotations Creators:
expert-generated
Source Datasets:
original
Tags:
License:
twitter_pos / twitter_pos.py
leondz's picture
update refs
3b67543
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Introduction to the CoNLL-2003 Shared Task: Language-Independent Named Entity Recognition"""
import os
import datasets
logger = datasets.logging.get_logger(__name__)
_CITATION = """\
@inproceedings{ritter2011named,
title={Named entity recognition in tweets: an experimental study},
author={Ritter, Alan and Clark, Sam and Etzioni, Oren and others},
booktitle={Proceedings of the 2011 conference on empirical methods in natural language processing},
pages={1524--1534},
year={2011}
}
@inproceedings{foster2011hardtoparse,
title={\# hardtoparse: POS Tagging and Parsing the Twitterverse},
author={Foster, Jennifer and Cetinoglu, Ozlem and Wagner, Joachim and Le Roux, Joseph and Hogan, Stephen and Nivre, Joakim and Hogan, Deirdre and Van Genabith, Josef},
booktitle={Workshops at the Twenty-Fifth AAAI Conference on Artificial Intelligence},
year={2011}
}
@inproceedings{derczynski2013twitter,
title={Twitter part-of-speech tagging for all: Overcoming sparse and noisy data},
author={Derczynski, Leon and Ritter, Alan and Clark, Sam and Bontcheva, Kalina},
booktitle={Proceedings of the international conference recent advances in natural language processing ranlp 2013},
pages={198--206},
year={2013}
}
"""
_DESCRIPTION = """\
Part-of-speech information is basic NLP task. However, Twitter text
is difficult to part-of-speech tag: it is noisy, with linguistic errors and idiosyncratic style.
This dataset contains two datasets for English PoS tagging for tweets:
* Ritter, with train/dev/test
* Foster, with dev/test
Splits defined in the Derczynski paper, but the data is from Ritter and Foster.
For more details see:
* https://gate.ac.uk/wiki/twitter-postagger.html
* https://aclanthology.org/D11-1141.pdf
* https://www.aaai.org/ocs/index.php/ws/aaaiw11/paper/download/3912/4191
"""
_URL = "http://downloads.gate.ac.uk/twitie/twitie-tagger.zip"
_RITTER_TRAIN = "twitie-tagger/corpora/ritter_train.stanford"
_RITTER_DEV = "twitie-tagger/corpora/ritter_dev.stanford"
_RITTER_TEST = "twitie-tagger/corpora/ritter_eval.stanford"
_FOSTER_TRAIN = None
_FOSTER_DEV = "twitie-tagger/corpora/foster_dev.stanford"
_FOSTER_TEST = "twitie-tagger/corpora/foster_eval.stanford"
class TwitterPosConfig(datasets.BuilderConfig):
"""BuilderConfig for TwitterPos"""
def __init__(self, **kwargs):
"""BuilderConfig for TwitterPos.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(TwitterPosConfig, self).__init__(**kwargs)
#assert variant in ('foster', 'ritter'), (f'Unrecognised variation: {variant}')
class TwitterPos(datasets.GeneratorBasedBuilder):
"""TwitterPos dataset."""
BUILDER_CONFIGS = [
TwitterPosConfig(name="foster", description="Foster English Twitter PoS bootstrap dataset"),
TwitterPosConfig(name="ritter", description="Ritter English Twitter PoS bootstrap dataset"),
]
def _info(self):
variant = self.config.name
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"pos_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
'"',
"''",
"#",
"%",
"$",
"(",
")",
",",
".",
":",
"``",
"CC",
"CD",
"DT",
"EX",
"FW",
"IN",
"JJ",
"JJR",
"JJS",
"LS",
"MD",
"NN",
"NNP",
"NNPS",
"NNS",
"NN|SYM",
"PDT",
"POS",
"PRP",
"PRP$",
"RB",
"RBR",
"RBS",
"RP",
"SYM",
"TO",
"UH",
"VB",
"VBD",
"VBG",
"VBN",
"VBP",
"VBZ",
"WDT",
"WP",
"WP$",
"WRB",
"RT",
"HT",
"USR",
"URL",
]
)
),
}
),
supervised_keys=None,
homepage="https://gate.ac.uk/wiki/twitter-postagger.html",
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
downloaded_file = dl_manager.download_and_extract(_URL)
if self.config.name == 'ritter':
data_files = {
"train": os.path.join(downloaded_file, _RITTER_TRAIN),
"dev": os.path.join(downloaded_file, _RITTER_DEV),
"test": os.path.join(downloaded_file, _RITTER_TEST),
}
elif self.config.name == 'foster':
data_files = {
"dev": os.path.join(downloaded_file, _FOSTER_DEV),
"test": os.path.join(downloaded_file, _FOSTER_TEST),
}
splits = [
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": data_files["dev"]}),
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": data_files["test"]}),
]
if "train" in data_files:
splits.append(datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": data_files["train"]}))
return splits
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
guid = 0
for line in f:
tokens = []
pos_tags = []
if line.startswith("-DOCSTART-") or line.strip() == "" or line == "\n":
continue
else:
line = line.replace('_VPB ', '_VBP ') # tag type fixes
line = line.replace('_TD ', '_DT ') # tag type fixes
line = line.replace('_ADVP ', '_RB ') # tag type fixes
line = line.replace('_NONE ', '_: ') # tag type fixes
line = line.replace(' please_VPP ', ' please_VBP ') # tag type fixes
line = line.replace(' ".._O ', ' ".._" ') # tag type fixes
# twitter-pos gives one seq per line, as token_tag
annotated_words = line.strip().split(' ')
tokens = ['_'.join(token.split('_')[:-1]) for token in annotated_words]
pos_tags = [token.split('_')[-1] for token in annotated_words]
yield guid, {
"id": str(guid),
"tokens": tokens,
"pos_tags": pos_tags,
}
guid += 1