EdwardXu commited on
Commit
5bb13ab
1 Parent(s): a465a60

Upload 4 files

Browse files
embeddings_50d_temp.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e74f88cde3ff2e36c815d13955c67983cf6f81829d2582cb6789c10786e5ef66
3
+ size 477405680
jan_16_in_class_assignment_ece_uw,_pmp_course_llm_2024.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Jan_16_In_Class_Assignment_ECE_UW,_PMP_course_LLM_2024.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1W2g1PyBwLNE_P_xlBg9C5BfFxiRDyDa2
8
+
9
+ # Embeddings and Semantic Search (LLM 2024)
10
+
11
+ ## This in-class coding exercise is to get hands-on with embeddings and one of its obvious application: Semantic Search.
12
+
13
+ Search is an area that a lot of companies have invested in. Any retail company has a search engine of its own to serve its products. But how many of them include semantics in search? Search is typically done through Tries. But when we bring semantics to search, the ball game entirely changes. Searching with semantics can help address tail queries whereas Trie searches are usually geared for head queries.
14
+ One of the bottlenecks in including semantics in search is latency - The more sophisticated the search, the slower the search inference will be. This is why for semantic search, there is no one-stop solution in a real-world scenario. Even though we have ChatGPT to return amazing results with the right prompting, we know what the latency this will incur, thus making it less viable in this scenario :-)
15
+ """
16
+
17
+ from google.colab import drive
18
+ drive.mount('/content/drive')
19
+
20
+ """## Install dependencies"""
21
+
22
+ !pip3 install sentence-transformers
23
+ !pip install datasets
24
+ !pip install -q streamlit
25
+
26
+ """## 1. Embeddings
27
+
28
+ Work on developing an embeddings class that goes from the simple glove embeddings to the more intricate sentence transformer embeddings
29
+ """
30
+
31
+ """
32
+ In this code block, you can develop a class for Embeddings -
33
+ That can fetch embeddings of different kinds for the purpose of "Semantic Search"
34
+ """
35
+ import numpy as np
36
+ import requests
37
+ import os
38
+ import pickle
39
+
40
+ from sentence_transformers import SentenceTransformer
41
+
42
+ class Embeddings:
43
+
44
+ def __init__(self):
45
+ """
46
+ Initialize the class
47
+ """
48
+ self.glove_embeddings_dim = 50
49
+
50
+
51
+ def download_glove_embeddings(self):
52
+ """
53
+ Download glove embeddings from web or from your gdrive if in optimized format
54
+ """
55
+ embeddings_temp = "/content/drive/MyDrive/EE596LLM/HW2/embeddings_50d_temp.npy"
56
+ word_index_temp = "/content/drive/MyDrive/EE596LLM/HW2/word_index_dict_50d_temp.pkl"
57
+
58
+
59
+ def load_glove_embeddings(self, embedding_dimension):
60
+ word_index_temp = "/content/drive/MyDrive/EE596LLM/HW2/word_index_dict_50d_temp.pkl"
61
+ embeddings_temp = "/content/drive/MyDrive/EE596LLM/HW2/embeddings_50d_temp.npy"
62
+
63
+ # Load word index dictionary
64
+ word_index_dict = pickle.load(open(word_index_temp, "rb"), encoding="latin")
65
+
66
+ # Load embeddings numpy
67
+ embeddings = np.load(embeddings_temp)
68
+
69
+ return word_index_dict, embeddings
70
+
71
+
72
+ def get_glove_embedding(self, word, word_index_dict, embeddings):
73
+ """
74
+ Retrieve GloVe embedding of a specific dimension
75
+ """
76
+ word = word.lower()
77
+ if word in word_index_dict:
78
+ return embeddings[word_index_dict[word]]
79
+ else:
80
+ return np.zeros(self.glove_embeddings_dim)
81
+
82
+
83
+
84
+ def embeddings_preprocess(self, word_index_dict, positive_words, negative_words, embeddings):
85
+ new_embedding = np.zeros(self.glove_embeddings_dim)
86
+
87
+ # for negative words
88
+ for word in negative_words:
89
+ new_embedding -= self.get_glove_embedding(word, word_index_dict, embeddings)
90
+
91
+ # for positive words
92
+ for word in positive_words:
93
+ new_embedding += self.get_glove_embedding(word, word_index_dict, embeddings)
94
+
95
+ return new_embedding
96
+
97
+
98
+
99
+
100
+
101
+ def get_sentence_transformer_embedding(self, sentence, transformer_name="all-MiniLM-L6-v2"):
102
+ """
103
+ Encode a sentence using sentence transformer and return embedding
104
+ """
105
+
106
+ sentenceTransformer = SentenceTransformer(transformer_name)
107
+
108
+ return sentenceTransformer.encode(sentence)
109
+
110
+
111
+
112
+ def get_averaged_glove_embeddings(self, sentence, embeddings_dict):
113
+
114
+ words = sentence.split(" ")
115
+ # Initialize an array of zeros for the embedding
116
+ glove_embedding = np.zeros(embeddings_dict['embeddings'].shape[1])
117
+
118
+ count_words = 0
119
+ for word in words:
120
+ word = word.lower() # Convert to lowercase to match the embeddings dictionary
121
+ if word in embeddings_dict['word_index']:
122
+ # Sum up embeddings for each word
123
+ glove_embedding += embeddings_dict['embeddings'][embeddings_dict['word_index'][word]]
124
+ count_words += 1
125
+
126
+ if count_words > 0:
127
+ # Average the embeddings
128
+ glove_embedding /= count_words
129
+
130
+ return glove_embedding
131
+
132
+ """## 2. Search Class
133
+
134
+ Implement a class with all the methods needed for search including cosine similarity
135
+ """
136
+
137
+ import numpy.linalg as la
138
+ import numpy as np
139
+
140
+ class Search:
141
+
142
+ def __init__(self, embeddings_model):
143
+ self.embeddings_model = embeddings_model
144
+
145
+
146
+ def cosine_similarity(self, x, y):
147
+
148
+ return np.dot(x,y)/max(la.norm(x)*la.norm(y),1e-3)
149
+
150
+ def get_topK_similar_categories(self, sentence, categories, top_k=10):
151
+ """Return top K most similar categories to a given sentence."""
152
+ sentence_embedding = self.embeddings_model.get_sentence_transformer_embedding(sentence)
153
+ similarities = {category: self.cosine_similarity(sentence_embedding, category_embedding) for category, category_embedding in categories.items()}
154
+ return dict(sorted(similarities.items(), key=lambda item: item[1], reverse=True)[:top_k])
155
+
156
+
157
+ def normalize_func(self, vector):
158
+ """Normalize a vector."""
159
+ norm = np.linalg.norm(vector)
160
+ return vector / norm if norm != 0 else vector
161
+
162
+ def find_closest_words(self, current_embedding, answer_list, word_index_dict, embeddings):
163
+ """Find closest word from answer_list to current_embedding."""
164
+ highest_similarity, closest_answer = -50, None
165
+ for choice in answer_list:
166
+ choice_embedding = self.embeddings_model.get_glove_embedding(choice, word_index_dict, embeddings)
167
+ similarity = self.cosine_similarity(current_embedding, choice_embedding)
168
+ if similarity > highest_similarity:
169
+ highest_similarity, closest_answer = similarity, choice
170
+ return closest_answer
171
+
172
+ def find_word_as(self, current_relation, target_word, answer_list, word_index_dict, embeddings):
173
+ """Find a word analogous to target_word based on current_relation."""
174
+ base_vector_a = self.embeddings_model.get_glove_embedding(current_relation[0], word_index_dict, embeddings)
175
+ base_vector_b = self.embeddings_model.get_glove_embedding(current_relation[1], word_index_dict, embeddings)
176
+ target_vector = self.embeddings_model.get_glove_embedding(target_word, word_index_dict, embeddings)
177
+ ref_difference = self.normalize_func(base_vector_b - base_vector_a)
178
+ answer, highest_similarity = None, -50
179
+ for choice in answer_list:
180
+ choice_vector = self.embeddings_model.get_glove_embedding(choice, word_index_dict, embeddings)
181
+ choice_difference = self.normalize_func(choice_vector - target_vector)
182
+ similarity = self.cosine_similarity(ref_difference, choice_difference)
183
+ if similarity > highest_similarity:
184
+ highest_similarity, answer = similarity, choice
185
+ return answer
186
+
187
+ def find_similarity_scores(self, current_embedding, choices, word_index_dict, embeddings):
188
+ """Calculate similarity scores between current_embedding and choices."""
189
+ similarity_scores = {}
190
+ for choice in choices:
191
+ choice_embedding = self.embeddings_model.get_glove_embedding(choice, word_index_dict, embeddings)
192
+ similarity = self.cosine_similarity(current_embedding, choice_embedding)
193
+ similarity_scores[choice] = similarity
194
+ return similarity_scores
195
+
196
+ """## 3. Word Arithmetic
197
+
198
+ Let's test your embeddings. Answer the question below through the search functionality you implemented above
199
+ """
200
+
201
+ embeddings_model = Embeddings()
202
+ search_using_cos = Search(embeddings_model)
203
+
204
+ word_index_dict, embeddings = embeddings_model.load_glove_embeddings(50)
205
+
206
+ current_embedding = embeddings_model.embeddings_preprocess( word_index_dict, ["king", "woman"], ["man"], embeddings)
207
+
208
+ closest_word = search_using_cos.find_closest_words(current_embedding, ["girl", "queen", "princess", "daughter", "mother"], word_index_dict, embeddings )
209
+
210
+ print("'King - Man + Woman':", closest_word)
211
+
212
+
213
+ word_index_dict, embeddings = embeddings_model.load_glove_embeddings(50)
214
+
215
+
216
+ closest_word = search_using_cos.find_word_as( ("tesla", "car"), "apple", ["fruit", "vegetable", "gas"], word_index_dict, embeddings)
217
+
218
+ print("'Tesla:Car as Apple:?': ", closest_word)
219
+
220
+ """## 4. Plots
221
+
222
+ Plot the search results as a pie chart with percentages allocated to the likelihood of the category being related to the search input
223
+ """
224
+
225
+ import matplotlib.pyplot as plt
226
+
227
+ def plot_pie_chart(category_similarity_scores):
228
+ """Plot a pie chart of category similarity scores."""
229
+ categories = list(category_similarity_scores.keys())
230
+ similarities = list(category_similarity_scores.values())
231
+ normalized_similarities = [sim / sum(similarities) for sim in similarities]
232
+
233
+ fig, ax = plt.subplots()
234
+ ax.pie(normalized_similarities, labels=categories, autopct="%1.11f%%", startangle=90)
235
+ ax.axis('equal') # Equal aspect ratio ensures the pie chart is circular.
236
+ plt.show()
237
+
238
+ word_index_dict, embeddings = embeddings_model.load_glove_embeddings(50)
239
+
240
+ # Find the word closest to the vector resulting from "king" - "man" + "woman"
241
+ current_embedding = embeddings_model.embeddings_preprocess(word_index_dict, ["king", "woman"], ["man"], embeddings)
242
+
243
+ # Calculate similarity scores for a set of words and plot them
244
+ sim_scores = search_using_cos.find_similarity_scores(current_embedding, ["girl", "queen", "princess", "daughter", "mother"], word_index_dict, embeddings)
245
+ plot_pie_chart(sim_scores)
246
+
247
+ """## 5. Test
248
+
249
+ Test your pie chart against some of the examples in the demo listed here:
250
+
251
+ https://categorysearch.streamlit.app or
252
+ https://searchdemo.streamlit.app
253
+
254
+ a) Do the results make sense?
255
+ b) Which embedding gives more meaningful results?
256
+
257
+ """
258
+
259
+ input_sentence = "Roses are red, trucks are blue, and Seattle is grey right now"
260
+ category_names = ["Flowers", "Colors", "Cars", "Weather", "Food"]
261
+
262
+ embeddings_model = Embeddings()
263
+ word_index_dict, embeddings = embeddings_model.load_glove_embeddings(50)
264
+ categories_embedding = {category: embeddings_model.get_sentence_transformer_embedding(category) for category in category_names}
265
+
266
+ search_instance = Search(embeddings_model)
267
+ category_similarity_scores = search_instance.get_topK_similar_categories(input_sentence, categories_embedding)
268
+
269
+ plot_pie_chart(category_similarity_scores) # Plot and see
270
+
271
+ """## 6. Bonus (if time permits)!
272
+ Create a simple streamlit or equivalent webapp like the link in 5.
273
+ This is also part of your Mini-Project 1!
274
+ """
275
+
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ streamlit
2
+ numpy
3
+ pickleshare
4
+ gdown
5
+ sentence-transformers
6
+ matplotlib
word_index_dict_50d_temp.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:674af352f703098ef122f6a8db7c5e08c5081829d49daea32e5aeac1fe582900
3
+ size 60284151