Datasets:
tner
/

Languages:
English
Multilinguality:
monolingual
Size Categories:
1k<10K
ArXiv:
Tags:
License:
tweetner7 / tweetner7.py
asahi417's picture
init
1484fd5
raw history blame
No virus
3.11 kB
""" NER dataset compiled by T-NER library https://github.com/asahi417/tner/tree/master/tner """
import json
from itertools import chain
import datasets
logger = datasets.logging.get_logger(__name__)
_DESCRIPTION = """[TweetNER7](TBA)"""
_NAME = "tweetner7"
_VERSION = "1.0.0"
_CITATION = """
TBA
"""
_HOME_PAGE = "https://github.com/asahi417/tner"
_URL = f'https://huggingface.co/datasets/tner/{_NAME}/raw/main/dataset'
_URLS = {
str(datasets.Split.TEST): [f'{_URL}/2021.test.json'],
str(datasets.Split.VALIDATION): [f'{_URL}/2020.dev.json'],
str(datasets.Split.TRAIN): [f'{_URL}/2020.train.json'],
f'{str(datasets.Split.TEST)}_2020': [f'{_URL}/2020.test.json'],
f'{str(datasets.Split.TEST)}_2021': [f'{_URL}/2021.test.json'],
f'{str(datasets.Split.VALIDATION)}_2020': [f'{_URL}/2020.dev.json'],
f'{str(datasets.Split.VALIDATION)}_2021': [f'{_URL}/2021.dev.json'],
f'{str(datasets.Split.TRAIN)}_2020': [f'{_URL}/2020.train.json'],
f'{str(datasets.Split.TRAIN)}_2021': [f'{_URL}/2021.train.json'],
f'{str(datasets.Split.VALIDATION)}_random': [f'{_URL}/random.dev.json'],
f'{str(datasets.Split.TRAIN)}_random': [f'{_URL}/random.train.json'],
f'extra_2020': [f'{_URL}/extra/2020.extra{i:02d}.json' for i in range(9)],
f'extra_2021': [f'{_URL}/extra/2021.extra{i:02d}.json' for i in range(10)]
}
class TweetNER7Config(datasets.BuilderConfig):
"""BuilderConfig"""
def __init__(self, **kwargs):
"""BuilderConfig.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(TweetNER7Config, self).__init__(**kwargs)
class TweetNER7(datasets.GeneratorBasedBuilder):
"""Dataset."""
BUILDER_CONFIGS = [
TweebankNERConfig(name=_NAME, version=datasets.Version(_VERSION), description=_DESCRIPTION),
]
def _split_generators(self, dl_manager):
downloaded_file = dl_manager.download_and_extract(_URLS)
return [datasets.SplitGenerator(name=i, gen_kwargs={"filepaths": downloaded_file[str(i)]})
for i in [datasets.Split.TRAIN, datasets.Split.VALIDATION, datasets.Split.TEST]]
def _generate_examples(self, filepaths):
_key = 0
for filepath in filepaths:
logger.info(f"generating examples from = {filepath}")
with open(filepath, encoding="utf-8") as f:
_list = [i for i in f.read().split('\n') if len(i) > 0]
for i in _list:
data = json.loads(i)
yield _key, data
_key += 1
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string")),
"tags": datasets.Sequence(datasets.Value("int32")),
"id": datasets.Value("string"),
"date": datasets.Value("string")
}
),
supervised_keys=None,
homepage=_HOME_PAGE,
citation=_CITATION,
)