Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K<n<10K
License:
File size: 3,402 Bytes
4477da2 df81834 001532b 232d8d3 9964826 8bd2e0e 165dad6 3ddb755 4477da2 3ddb755 605f08a 3ddb755 8bd2e0e 3ddb755 4477da2 f2a7261 8bd2e0e 4477da2 232d8d3 4477da2 8bd2e0e 4477da2 3ddb755 4477da2 3ddb755 ed7f4e7 3ddb755 ed7f4e7 8bd2e0e 28552e1 4477da2 7169684 28552e1 4477da2 28552e1 4477da2 28552e1 4477da2 28552e1 4477da2 f2a7261 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
import datasets
logger = datasets.logging.get_logger(__name__)
_URL = "https://github.com/Kriyansparsana/demorepo/blob/main/"
_TRAINING_FILE = "indinan_namestrain.conll"
class indian_namesConfig(datasets.BuilderConfig):
"""The WNUT 17 Emerging Entities Dataset."""
def __init__(self, **kwargs):
"""BuilderConfig for WNUT 17.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(indian_namesConfig, self).__init__(**kwargs)
class WNUT_17(datasets.GeneratorBasedBuilder):
"""The WNUT 17 Emerging Entities Dataset."""
BUILDER_CONFIGS = [
indian_namesConfig(
name="wnut_17", version=datasets.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset"
),
]
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"O",
"B-PER",
"B-ORG"
]
)
),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}{_TRAINING_FILE}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
current_tokens = []
current_labels = []
sentence_counter = 0
for row in f:
row = row.rstrip()
if row:
token, label = row.split("\t")
current_tokens.append(token)
current_labels.append(label)
else:
# New sentence
if not current_tokens:
# Consecutive empty lines will cause empty sentences
continue
assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
sentence = (
sentence_counter,
{
"id": str(sentence_counter),
"tokens": current_tokens,
"ner_tags": current_labels,
},
)
sentence_counter += 1
current_tokens = []
current_labels = []
yield sentence
# Don't forget last sentence in dataset 🧐
if current_tokens:
yield sentence_counter, {
"id": str(sentence_counter),
"tokens": current_tokens,
"ner_tags": current_labels,
} |