madhavkotecha commited on
Commit
ce62bb3
1 Parent(s): 89435c1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +59 -64
app.py CHANGED
@@ -12,7 +12,6 @@ from tqdm import tqdm
12
  import gradio as gr
13
  import matplotlib.pyplot as plt
14
  from sklearn import metrics
15
- from sklearn.model_selection import KFold
16
 
17
  nltk.download('stopwords')
18
  nltk.download('punkt_tab')
@@ -22,12 +21,19 @@ PUNCT = set([".", ",", "!", "?", ":", ";", "-", "(", ")", "[", "]", "{", "}", "'
22
  Features_count = 6
23
  SEED = 42
24
 
 
 
 
 
 
 
 
25
  class NEI:
26
  def __init__(self):
27
  self.model = None
28
  self.scaler = StandardScaler()
29
  self.vectorizer = DictVectorizer(sparse=True)
30
- self.tagset = ['Name[1]', 'No-Name[0]']
31
 
32
  def load_dataset(self, file):
33
  sentences = []
@@ -48,51 +54,6 @@ class NEI:
48
  sentences.append(sentence)
49
  return sentences
50
 
51
- def sent2features(self, sentence):
52
- return [self.word2features(sentence, i) for i in range(len(sentence))]
53
-
54
- def sent2labels(self, sentence):
55
- return [label for _, _, label in sentence]
56
-
57
- def word2features(self, sentence, i):
58
- word = sentence[i][0]
59
- pos_tag = sentence[i][1]
60
- features = {
61
- 'word': word,
62
- 'pos_tag': pos_tag,
63
- 'word.isupper': int(word.isupper()),
64
- 'word.islower': int(word.islower()),
65
- 'word.istitle': int(word.istitle()),
66
- 'word.isdigit': int(word.isdigit()),
67
- 'word.prefix2': word[:2],
68
- 'word.prefix3': word[:3],
69
- 'word.suffix2': word[-2:],
70
- 'word.suffix3': word[-3:],
71
- }
72
- # Add context features
73
- if i > 0:
74
- prv_word = sentence[i - 1][0]
75
- prv_pos_tag = sentence[i - 1][1]
76
- features.update({
77
- '-1:word': prv_word,
78
- '-1:pos_tag': prv_pos_tag,
79
- '-1:word.isupper': int(prv_word.isupper()),
80
- '-1:word.istitle': int(prv_word.istitle()),
81
- })
82
- else:
83
- features['BOS'] = True
84
- if i < len(sentence) - 1:
85
- next_word = sentence[i + 1][0]
86
- next_pos_tag = sentence[i + 1][1]
87
- features.update({
88
- '+1:word': next_word,
89
- '+1:pos_tag': next_pos_tag,
90
- '+1:word.isupper': int(next_word.isupper()),
91
- '+1:word.istitle': int(next_word.istitle()),
92
- })
93
- else:
94
- features['EOS'] = True
95
- return features
96
 
97
  def performance(self, y_true, y_pred):
98
  print(classification_report(y_true, y_pred))
@@ -106,32 +67,53 @@ class NEI:
106
  def confusion_matrix(self,y_true,y_pred):
107
  matrix = metrics.confusion_matrix(y_true,y_pred)
108
  normalized_matrix = matrix/np.sum(matrix, axis=1, keepdims=True)
109
- _, ax = plt.subplots()
110
- ax.tick_params(top=True)
 
 
 
 
111
  plt.xticks(np.arange(len(self.tagset)), self.tagset)
112
  plt.yticks(np.arange(len(self.tagset)), self.tagset)
113
  for i in range(normalized_matrix.shape[0]):
114
- for j in range(normalized_matrix.shape[1]):
115
- plt.text(j, i, format(normalized_matrix[i, j], '0.2f'), horizontalalignment="center")
 
 
 
 
116
  plt.imshow(normalized_matrix,interpolation='nearest',cmap=plt.cm.GnBu)
117
  plt.colorbar()
118
  plt.savefig('Confusion_Matrix.png')
119
-
120
- def vectorize(self, w, scaled_position):
121
- title = 1 if w[0].isupper() else 0
122
- allcaps = 1 if w.isupper() else 0
123
- sw = 1 if w.lower() in SW else 0
124
- punct = 1 if w in PUNCT else 0
125
- return [title, allcaps, len(w), sw, punct, scaled_position]
 
 
 
 
 
 
 
 
 
 
 
 
126
 
127
  def create_data(self, data):
128
  words, features, labels = [], [], []
129
  for d in tqdm(data):
130
  tags = d["ner_tags"]
131
-
132
  tokens = d["tokens"]
133
  for i, token in enumerate(tokens):
134
- x = self.vectorize(token, scaled_position=(i / len(tokens)))
 
 
135
  y = 1 if tags[i] > 0 else 0
136
  features.append(x)
137
  labels.append(y)
@@ -150,18 +132,31 @@ class NEI:
150
  X_val = self.scaler.transform(X_val)
151
  y_pred_val = self.model.predict(X_val)
152
  # print(classification_report(y_true=y_val, y_pred=y_pred_val))
 
153
  self.confusion_matrix(y_val,y_pred_val)
154
  self.performance(y_val,y_pred_val)
155
 
156
  def infer(self, sentence):
157
  tokens = word_tokenize(sentence)
158
- features = [self.vectorize(token, i / len(tokens)) for i, token in enumerate(tokens)]
 
 
 
 
 
 
 
 
 
 
 
 
 
159
  features = np.array(features, dtype=np.float32)
160
  scaled_features = self.scaler.transform(features)
161
  y_pred = self.model.predict(scaled_features)
162
  return list(zip(tokens, y_pred))
163
 
164
-
165
  data = load_dataset("conll2003", trust_remote_code=True)
166
  nei_model = NEI()
167
 
@@ -173,7 +168,7 @@ nei_model.evaluate(data["validation"])
173
 
174
  def annotate(text):
175
  predictions = nei_model.infer(text)
176
- annotated_output = " ".join([f"{word}_{int(label)} " for word, label in predictions])
177
  return annotated_output
178
 
179
  interface = gr.Interface(fn = annotate,
@@ -186,6 +181,6 @@ interface = gr.Interface(fn = annotate,
186
  placeholder="Tagged sentence appears here...",
187
  ),
188
  title = "Named Entity Recognition",
189
- description = "CS626 Assignment 2 (Autumn 2024)",
190
  theme=gr.themes.Soft())
191
  interface.launch()
 
12
  import gradio as gr
13
  import matplotlib.pyplot as plt
14
  from sklearn import metrics
 
15
 
16
  nltk.download('stopwords')
17
  nltk.download('punkt_tab')
 
21
  Features_count = 6
22
  SEED = 42
23
 
24
+ SW = set(nltk.corpus.stopwords.words("english"))
25
+ PUNCT = set([".", ",", "!", "?", ":", ";", "-", "(", ")", "[", "]", "{", "}", "'", '"'])
26
+ connectors = set(["of", "in", "and", "for", "to", "with", "at", "from"])
27
+ start_words = set(["the", "a", "an", "this", "that", "these", "those", "my", "your", "his", "her", "its", "our", "their", "few", "many", "several", "all", "most", "some", "any", "every", "each", "either", "neither", "both", "another", "other", "more", "less", "fewer", "little", "much", "great", "good", "bad", "first", "second", "third", "last", "next", "previous"])
28
+ Features_count = 6
29
+ SEED = 42
30
+
31
  class NEI:
32
  def __init__(self):
33
  self.model = None
34
  self.scaler = StandardScaler()
35
  self.vectorizer = DictVectorizer(sparse=True)
36
+ self.tagset = ['No-Name[0]', 'Name[1]']
37
 
38
  def load_dataset(self, file):
39
  sentences = []
 
54
  sentences.append(sentence)
55
  return sentences
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  def performance(self, y_true, y_pred):
59
  print(classification_report(y_true, y_pred))
 
67
  def confusion_matrix(self,y_true,y_pred):
68
  matrix = metrics.confusion_matrix(y_true,y_pred)
69
  normalized_matrix = matrix/np.sum(matrix, axis=1, keepdims=True)
70
+
71
+ # disp = metrics.ConfusionMatrixDisplay(confusion_matrix=normalized_matrix, display_labels=self.tagset)
72
+ fig, ax = plt.subplots()
73
+ # disp.plot(cmap=plt.cm.GnBu, ax=ax, colorbar=True)
74
+ ax.xaxis.set_ticks_position('top')
75
+ ax.xaxis.set_label_position('top')
76
  plt.xticks(np.arange(len(self.tagset)), self.tagset)
77
  plt.yticks(np.arange(len(self.tagset)), self.tagset)
78
  for i in range(normalized_matrix.shape[0]):
79
+ for j in range(normalized_matrix.shape[1]):
80
+ text = f"{normalized_matrix[i, j]:.2f}"
81
+ ax.text(j, i, text, ha="center", va="center", color="black")
82
+ plt.title("Normalized Confusion Matrix")
83
+ plt.xlabel("Predicted Label")
84
+ plt.ylabel("True Label")
85
  plt.imshow(normalized_matrix,interpolation='nearest',cmap=plt.cm.GnBu)
86
  plt.colorbar()
87
  plt.savefig('Confusion_Matrix.png')
88
+ # plt.xticks(np.arange(len(self.tagset)), self.tagset)
89
+ # plt.yticks(np.arange(len(self.tagset)), self.tagset)
90
+ # for i in range(normalized_matrix.shape[0]):
91
+ # for j in range(normalized_matrix.shape[1]):
92
+ # plt.text(j, i, format(normalized_matrix[i, j], '0.2f'), horizontalalignment="center")
93
+ # plt.imshow(normalized_matrix,interpolation='nearest',cmap=plt.cm.GnBu)
94
+ # plt.colorbar()
95
+ # plt.savefig('Confusion_Matrix.png')
96
+
97
+ def vectorize(self, w, scaled_position, prev_tag=0, next_tag=0, prev_token=None):
98
+ is_titlecase = 1 if w[0].isupper() else 0
99
+ is_allcaps = 1 if w.isupper() else 0
100
+ is_sw = 1 if w.lower() in SW else 0
101
+ is_punct = 1 if w in PUNCT else 0
102
+ is_surrounded_by_entities = 1 if (prev_tag > 0 and next_tag > 0) else 0
103
+ is_connector = 1 if (w.lower() in connectors) and (prev_tag > 0 and next_tag > 0) else 0
104
+ # is_start_of_sentence = 1 if (scaled_position == 0 or prev_token in [".", "!", "?"]) and w.lower() not in start_words else 0
105
+ # is_start_of_sentence = 1 if scaled_position == 0 else 0
106
+ return [is_titlecase, is_allcaps, len(w), is_sw, is_punct, is_connector, scaled_position]
107
 
108
  def create_data(self, data):
109
  words, features, labels = [], [], []
110
  for d in tqdm(data):
111
  tags = d["ner_tags"]
 
112
  tokens = d["tokens"]
113
  for i, token in enumerate(tokens):
114
+ prev_tag = tags[i - 1] if i > 0 else 0
115
+ next_tag = tags[i + 1] if i < len(tokens) - 1 else 0
116
+ x = self.vectorize(token, scaled_position=(i / len(tokens)), prev_tag=prev_tag, next_tag=next_tag, prev_token=tokens[i-1] if i > 0 else None)
117
  y = 1 if tags[i] > 0 else 0
118
  features.append(x)
119
  labels.append(y)
 
132
  X_val = self.scaler.transform(X_val)
133
  y_pred_val = self.model.predict(X_val)
134
  # print(classification_report(y_true=y_val, y_pred=y_pred_val))
135
+ print(metrics.confusion_matrix(y_val,y_pred_val))
136
  self.confusion_matrix(y_val,y_pred_val)
137
  self.performance(y_val,y_pred_val)
138
 
139
  def infer(self, sentence):
140
  tokens = word_tokenize(sentence)
141
+ features = []
142
+
143
+ raw_features = [self.vectorize(token, i / len(tokens), prev_token=tokens[i-1] if i > 0 else None) for i, token in enumerate(tokens)]
144
+ raw_features = np.array(raw_features, dtype=np.float32)
145
+ scaled_features = self.scaler.transform(raw_features)
146
+ y_pred = self.model.predict(scaled_features)
147
+
148
+ for i, token in enumerate(tokens):
149
+ prev_tag = y_pred[i - 1] if i > 0 else 0
150
+ next_tag = y_pred[i + 1] if i < len(tokens) - 1 else 0
151
+
152
+ feature_with_context = self.vectorize(token, i / len(tokens), prev_tag, next_tag, prev_token=tokens[i-1] if i > 0 else None)
153
+ features.append(feature_with_context)
154
+
155
  features = np.array(features, dtype=np.float32)
156
  scaled_features = self.scaler.transform(features)
157
  y_pred = self.model.predict(scaled_features)
158
  return list(zip(tokens, y_pred))
159
 
 
160
  data = load_dataset("conll2003", trust_remote_code=True)
161
  nei_model = NEI()
162
 
 
168
 
169
  def annotate(text):
170
  predictions = nei_model.infer(text)
171
+ annotated_output = " ".join([f"{word}_{int(label)} " for word, label in predictions])
172
  return annotated_output
173
 
174
  interface = gr.Interface(fn = annotate,
 
181
  placeholder="Tagged sentence appears here...",
182
  ),
183
  title = "Named Entity Recognition",
184
+ description = "CS626 Assignment 3 (Autumn 2024)",
185
  theme=gr.themes.Soft())
186
  interface.launch()