Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
English
Size:
1K<n<10K
License:
Update indian_names.py
Browse files- indian_names.py +44 -28
indian_names.py
CHANGED
@@ -2,7 +2,7 @@ import datasets
|
|
2 |
|
3 |
logger = datasets.logging.get_logger(__name__)
|
4 |
|
5 |
-
_URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/
|
6 |
|
7 |
class indian_namesConfig(datasets.BuilderConfig):
|
8 |
"""The WNUT 17 Emerging Entities Dataset."""
|
@@ -32,8 +32,11 @@ class indian_names(datasets.GeneratorBasedBuilder):
|
|
32 |
"ner_tags": datasets.Sequence(
|
33 |
datasets.features.ClassLabel(
|
34 |
names=[
|
35 |
-
"
|
36 |
-
"B-
|
|
|
|
|
|
|
37 |
]
|
38 |
)
|
39 |
),
|
@@ -52,33 +55,46 @@ class indian_names(datasets.GeneratorBasedBuilder):
|
|
52 |
return [
|
53 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
54 |
]
|
55 |
-
|
56 |
def _generate_examples(self, filepath):
|
57 |
logger.info("⏳ Generating examples from = %s", filepath)
|
58 |
with open(filepath, encoding="utf-8") as f:
|
59 |
-
|
60 |
-
|
61 |
-
|
62 |
-
for
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
else:
|
74 |
-
#
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
}
|
|
|
2 |
|
3 |
logger = datasets.logging.get_logger(__name__)
|
4 |
|
5 |
+
_URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/wnut17train%20(1).conll"
|
6 |
|
7 |
class indian_namesConfig(datasets.BuilderConfig):
|
8 |
"""The WNUT 17 Emerging Entities Dataset."""
|
|
|
32 |
"ner_tags": datasets.Sequence(
|
33 |
datasets.features.ClassLabel(
|
34 |
names=[
|
35 |
+
"O",
|
36 |
+
"B-corporation",
|
37 |
+
"I-corporation",
|
38 |
+
"B-person",
|
39 |
+
"I-person"
|
40 |
]
|
41 |
)
|
42 |
),
|
|
|
55 |
return [
|
56 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
57 |
]
|
58 |
+
|
59 |
def _generate_examples(self, filepath):
|
60 |
logger.info("⏳ Generating examples from = %s", filepath)
|
61 |
with open(filepath, encoding="utf-8") as f:
|
62 |
+
current_tokens = []
|
63 |
+
current_labels = []
|
64 |
+
sentence_counter = 0
|
65 |
+
for row in f:
|
66 |
+
row = row.rstrip()
|
67 |
+
if row:
|
68 |
+
if "\t" in row:
|
69 |
+
token, label = row.split("\t")
|
70 |
+
current_tokens.append(token)
|
71 |
+
current_labels.append(label)
|
72 |
+
else:
|
73 |
+
# Handle cases where the delimiter is missing
|
74 |
+
# You can choose to skip these rows or handle them differently
|
75 |
+
logger.warning(f"Delimiter missing in row: {row}")
|
76 |
else:
|
77 |
+
# New sentence
|
78 |
+
if not current_tokens:
|
79 |
+
# Consecutive empty lines will cause empty sentences
|
80 |
+
continue
|
81 |
+
assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
|
82 |
+
sentence = (
|
83 |
+
sentence_counter,
|
84 |
+
{
|
85 |
+
"id": str(sentence_counter),
|
86 |
+
"tokens": current_tokens,
|
87 |
+
"ner_tags": current_labels,
|
88 |
+
},
|
89 |
+
)
|
90 |
+
sentence_counter += 1
|
91 |
+
current_tokens = []
|
92 |
+
current_labels = []
|
93 |
+
yield sentence
|
94 |
+
# Don't forget the last sentence in the dataset 🧐
|
95 |
+
if current_tokens:
|
96 |
+
yield sentence_counter, {
|
97 |
+
"id": str(sentence_counter),
|
98 |
+
"tokens": current_tokens,
|
99 |
+
"ner_tags": current_labels,
|
100 |
}
|