Kriyans commited on
Commit
0eeb458
1 Parent(s): 9519aa3

Upload ner.py

Browse files
Files changed (1) hide show
  1. ner.py +107 -0
ner.py ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+
3
+ logger = datasets.logging.get_logger(__name__)
4
+
5
+ _URL = "https://raw.githubusercontent.com/Kriyansparsana/demorepo/main/"
6
+ _TRAINING_FILE = "wnut17train%20(1).conll"
7
+ _DEV_FILE = "indian_ner_dev.conll"
8
+ _TEST_FILE = "indian_ner_test.conll"
9
+
10
+ class indian_namesConfig(datasets.BuilderConfig):
11
+ """The WNUT 17 Emerging Entities Dataset."""
12
+
13
+ def __init__(self, **kwargs):
14
+ """BuilderConfig for WNUT 17.
15
+ Args:
16
+ **kwargs: keyword arguments forwarded to super.
17
+ """
18
+ super(indian_namesConfig, self).__init__(**kwargs)
19
+
20
+ class indian_names(datasets.GeneratorBasedBuilder):
21
+ """The WNUT 17 Emerging Entities Dataset."""
22
+
23
+ BUILDER_CONFIGS = [
24
+ indian_namesConfig(
25
+ name="indian_names", version=datasets.Version("1.0.0"), description="The WNUT 17 Emerging Entities Dataset"
26
+ ),
27
+ ]
28
+
29
+ def _info(self):
30
+ return datasets.DatasetInfo(
31
+ features=datasets.Features(
32
+ {
33
+ "id": datasets.Value("string"),
34
+ "tokens": datasets.Sequence(datasets.Value("string")),
35
+ "ner_tags": datasets.Sequence(
36
+ datasets.features.ClassLabel(
37
+ names=[
38
+ "O",
39
+ "B-corporation",
40
+ "I-corporation",
41
+ "B-person",
42
+ "I-person",
43
+ ]
44
+ )
45
+ ),
46
+ }
47
+ ),
48
+ supervised_keys=None,
49
+ )
50
+
51
+ def _split_generators(self, dl_manager):
52
+ """Returns SplitGenerators."""
53
+ urls_to_download = {
54
+ "train": f"{_URL}{_TRAINING_FILE}",
55
+ "dev": f"{_URL}{_DEV_FILE}",
56
+ "test": f"{_URL}{_TEST_FILE}",
57
+ }
58
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
59
+
60
+ return [
61
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
62
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
63
+ datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"]}),
64
+ ]
65
+
66
+ def _generate_examples(self, filepath):
67
+ logger.info("⏳ Generating examples from = %s", filepath)
68
+ with open(filepath, encoding="utf-8") as f:
69
+ current_tokens = []
70
+ current_labels = []
71
+ sentence_counter = 0
72
+ for row in f:
73
+ row = row.rstrip()
74
+ if row:
75
+ if "\t" in row:
76
+ token, label = row.split("\t")
77
+ current_tokens.append(token)
78
+ current_labels.append(label)
79
+ else:
80
+ # Handle cases where the delimiter is missing
81
+ # You can choose to skip these rows or handle them differently
82
+ logger.warning(f"Delimiter missing in row: {row}")
83
+ else:
84
+ # New sentence
85
+ if not current_tokens:
86
+ # Consecutive empty lines will cause empty sentences
87
+ continue
88
+ assert len(current_tokens) == len(current_labels), "💔 between len of tokens & labels"
89
+ sentence = (
90
+ sentence_counter,
91
+ {
92
+ "id": str(sentence_counter),
93
+ "tokens": current_tokens,
94
+ "ner_tags": current_labels,
95
+ },
96
+ )
97
+ sentence_counter += 1
98
+ current_tokens = []
99
+ current_labels = []
100
+ yield sentence
101
+ # Don't forget the last sentence in the dataset 🧐
102
+ if current_tokens:
103
+ yield sentence_counter, {
104
+ "id": str(sentence_counter),
105
+ "tokens": current_tokens,
106
+ "ner_tags": current_labels,
107
+ }