Datasets:
Tasks:
Token Classification
Sub-tasks:
named-entity-recognition
Languages:
English
Multilinguality:
monolingual
Size Categories:
1K<n<10K
Language Creators:
found
Annotations Creators:
crowdsourced
Source Datasets:
original
License:
Update ner.py
Browse files
ner.py
CHANGED
@@ -3,9 +3,9 @@ import datasets
|
|
3 |
logger = datasets.logging.get_logger(__name__)
|
4 |
|
5 |
_URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/"
|
6 |
-
_TRAINING_FILE = "
|
7 |
# _DEV_FILE = "indian_dataset.conll"
|
8 |
-
|
9 |
|
10 |
class indian_namesConfig(datasets.BuilderConfig):
|
11 |
"""The WNUT 17 Emerging Entities Dataset."""
|
@@ -38,8 +38,17 @@ class indian_names(datasets.GeneratorBasedBuilder):
|
|
38 |
"O",
|
39 |
"B-corporation",
|
40 |
"I-corporation",
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
"B-person",
|
42 |
-
"I-person",
|
|
|
|
|
|
|
43 |
)
|
44 |
),
|
45 |
}
|
@@ -52,14 +61,14 @@ class indian_names(datasets.GeneratorBasedBuilder):
|
|
52 |
urls_to_download = {
|
53 |
"train": f"{_URL}{_TRAINING_FILE}",
|
54 |
# "dev": f"{_URL}{_DEV_FILE}",
|
55 |
-
|
56 |
}
|
57 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
58 |
|
59 |
return [
|
60 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
61 |
# datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
|
62 |
-
|
63 |
]
|
64 |
|
65 |
def _generate_examples(self, filepath):
|
|
|
3 |
logger = datasets.logging.get_logger(__name__)
|
4 |
|
5 |
_URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/"
|
6 |
+
_TRAINING_FILE = "Indian_dataset_wnut_train.conll"
|
7 |
# _DEV_FILE = "indian_dataset.conll"
|
8 |
+
_TEST_FILE = "emerging.test.annotated"
|
9 |
|
10 |
class indian_namesConfig(datasets.BuilderConfig):
|
11 |
"""The WNUT 17 Emerging Entities Dataset."""
|
|
|
38 |
"O",
|
39 |
"B-corporation",
|
40 |
"I-corporation",
|
41 |
+
"B-creative-work",
|
42 |
+
"I-creative-work",
|
43 |
+
"B-group",
|
44 |
+
"I-group",
|
45 |
+
"B-location",
|
46 |
+
"I-location",
|
47 |
"B-person",
|
48 |
+
"I-person",
|
49 |
+
"B-product",
|
50 |
+
"I-product",
|
51 |
+
]
|
52 |
)
|
53 |
),
|
54 |
}
|
|
|
61 |
urls_to_download = {
|
62 |
"train": f"{_URL}{_TRAINING_FILE}",
|
63 |
# "dev": f"{_URL}{_DEV_FILE}",
|
64 |
+
"test": f"{_URL}{_TEST_FILE}",
|
65 |
}
|
66 |
downloaded_files = dl_manager.download_and_extract(urls_to_download)
|
67 |
|
68 |
return [
|
69 |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
|
70 |
# datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
|
71 |
+
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
|
72 |
]
|
73 |
|
74 |
def _generate_examples(self, filepath):
|