Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K<n<10K
License:
import datasets | |
logger = datasets.logging.get_logger(__name__) | |
_URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/wnut17train%20(1).conll" | |
class indian_namesConfig(datasets.BuilderConfig): | |
"""The WNUT 17 Emerging Entities Dataset.""" | |
def __init__(self, **kwargs): | |
"""BuilderConfig for WNUT 17. | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(indian_namesConfig, self).__init__(**kwargs) | |
class indian_names(datasets.GeneratorBasedBuilder): | |
"""The WNUT 17 Emerging Entities Dataset.""" | |
BUILDER_CONFIGS = [ | |
indian_namesConfig( | |
name="indian_names", version=datasets.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset" | |
), | |
] | |
def _info | |
return datasets.DatasetInfo( | |
features=datasets.Features( | |
{ | |
"id": datasets.Value("string"), | |
"tokens": datasets.Sequence(datasets.Value("string")), | |
"ner_tags": datasets.Sequence( | |
datasets.features.ClassLabel( | |
names=[ | |
"O", | |
"B-corporation", | |
"I-corporation", | |
"B-person", | |
] | |
) | |
), | |
} | |
), | |
supervised_keys=None, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
urls_to_download = { | |
"train": f"{_URL}", | |
} | |
downloaded_files = dl_manager.download_and_extract(urls_to_download) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}), | |
] | |
def _generate_examples(self, filepath): | |
logger.info("β³ Generating examples from = %s", filepath) | |
with open(filepath, encoding="utf-8") as f: | |
current_tokens = [] | |
current_labels = [] | |
sentence_counter = 0 | |
for row in f: | |
row = row.rstrip() | |
if row: | |
# Check if the delimiter ("\t") is present in the row | |
if "\t" in row: | |
token, label = row.split("\t") | |
current_tokens.append(token) | |
current_labels.append(label) | |
else: | |
# New sentence | |
if not current_tokens: | |
# Consecutive empty lines will cause empty sentences | |
continue | |
assert len(current_tokens) == len(current_labels), "π between len of tokens & labels" | |
sentence = ( | |
sentence_counter, | |
{ | |
"id": str(sentence_counter), | |
"tokens": current_tokens, | |
"ner_tags": current_labels, | |
}, | |
) | |
sentence_counter += 1 | |
current_tokens = [] | |
current_labels = [] | |
yield sentence | |
# Don't forget the last sentence in the dataset π§ | |
if current_tokens: | |
yield sentence_counter, { | |
"id": str(sentence_counter), | |
"tokens": current_tokens, | |
"ner_tags": current_labels, | |
} | |
# def _generate_examples(self, filepath): | |
# logger.info("β³ Generating examples from = %s", filepath) | |
# with open(filepath, encoding="utf-8") as f: | |
# current_tokens = [] | |
# current_labels = [] | |
# sentence_counter = 0 | |
# for row in f: | |
# row = row.rstrip() | |
# if row: | |
# token, label = row.split("\t") | |
# current_tokens.append(token) | |
# current_labels.append(label) | |
# else: | |
# # New sentence | |
# if not current_tokens: | |
# # Consecutive empty lines will cause empty sentences | |
# continue | |
# assert len(current_tokens) == len(current_labels), "π between len of tokens & labels" | |
# sentence = ( | |
# sentence_counter, | |
# { | |
# "id": str(sentence_counter), | |
# "tokens": current_tokens, | |
# "ner_tags": current_labels, | |
# }, | |
# ) | |
# sentence_counter += 1 | |
# current_tokens = [] | |
# current_labels = [] | |
# yield sentence | |
# # Don't forget last sentence in dataset π§ | |
# if current_tokens: | |
# yield sentence_counter, { | |
# "id": str(sentence_counter), | |
# "tokens": current_tokens, | |
# "ner_tags": current_labels, | |
# } | |