Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K<n<10K
License:
File size: 3,373 Bytes
4477da2 65b51a7 df81834 65b51a7 17fdc5b 9964826 8bd2e0e 2b933c2 06e4a0f 3ddb755 4477da2 06e4a0f db585e5 2b933c2 3ddb755 2b933c2 06e4a0f 4477da2 06e4a0f 4477da2 930884e 06e4a0f 4477da2 3ddb755 4477da2 3ddb755 7913d56 06e4a0f 3ddb755 7913d56 ed7f4e7 3ddb755 7913d56 3ddb755 ed7f4e7 8bd2e0e 28552e1 4477da2 06e4a0f db585e5 06e4a0f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import datasets
logger = datasets.logging.get_logger(__name__)
_URL = "https://github.com/Kriyansparsana/demorepo/blob/f4501f1de2c759ee215952b2288e47ef5161f658/indian-name-org.csv"
class indian_namesConfig(datasets.BuilderConfig):
"""The WNUT 17 Emerging Entities Dataset."""
def __init__(self, **kwargs):
"""BuilderConfig for WNUT 17.
Args:
**kwargs: keyword arguments forwarded to super.
"""
super(indian_namesConfig, self).__init__(**kwargs)
class indian_names(datasets.GeneratorBasedBuilder):
"""The WNUT 17 Emerging Entities Dataset."""
BUILDER_CONFIGS = [
indian_namesConfig(
name="indian_names", version=datasets.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset"
),
]
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features(
{
"id": datasets.Value("string"),
"tokens": datasets.Sequence(datasets.Value("string")),
"ner_tags": datasets.Sequence(
datasets.features.ClassLabel(
names=[
"B-PER",
"B-ORG"
]
)
),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
urls_to_download = {
"train": f"{_URL}",
}
downloaded_files = dl_manager.download_and_extract(urls_to_download)
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
]
def _generate_examples(self, filepath):
logger.info("⏳ Generating examples from = %s", filepath)
with open(filepath, encoding="utf-8") as f:
current_tokens = []
current_labels = []
sentence_counter = 0
for row in f:
row = row.rstrip()
if row:
token, label = row.split("\t")
current_tokens.append(token)
current_labels.append(label)
else:
# New sentence
if not current_tokens:
# Consecutive empty lines will cause empty sentences
continue
assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
sentence = (
sentence_counter,
{
"id": str(sentence_counter),
"tokens": current_tokens,
"ner_tags": current_labels,
},
)
sentence_counter += 1
current_tokens = []
current_labels = []
yield sentence
# Don't forget last sentence in dataset 🧐
if current_tokens:
yield sentence_counter, {
"id": str(sentence_counter),
"tokens": current_tokens,
"ner_tags": current_labels,
}
|