Spaces:
Sleeping
Sleeping
File size: 11,159 Bytes
5bb13ab baa63a5 5bb13ab 5dfb73d 5bb13ab a07cb6e 5bb13ab a07cb6e 5bb13ab eea13ab 5bb13ab eea13ab 5bb13ab 8ed8a0a eea13ab 8ed8a0a eea13ab 8ed8a0a eea13ab 8ed8a0a eea13ab 8ed8a0a eea13ab 8ed8a0a 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a eea13ab 8ed8a0a 3531b28 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a 91a8fc3 8ed8a0a 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a 0e7c08f 8ed8a0a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 |
# -*- coding: utf-8 -*-
"""Jan_16_In_Class_Assignment_ECE_UW,_PMP_course_LLM_2024.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1W2g1PyBwLNE_P_xlBg9C5BfFxiRDyDa2
# Embeddings and Semantic Search (LLM 2024)
## This in-class coding exercise is to get hands-on with embeddings and one of its obvious application: Semantic Search.
Search is an area that a lot of companies have invested in. Any retail company has a search engine of its own to serve its products. But how many of them include semantics in search? Search is typically done through Tries. But when we bring semantics to search, the ball game entirely changes. Searching with semantics can help address tail queries whereas Trie searches are usually geared for head queries.
One of the bottlenecks in including semantics in search is latency - The more sophisticated the search, the slower the search inference will be. This is why for semantic search, there is no one-stop solution in a real-world scenario. Even though we have ChatGPT to return amazing results with the right prompting, we know what the latency this will incur, thus making it less viable in this scenario :-)
"""
import numpy as np
import requests
import os
import pickle
import streamlit as st
from sentence_transformers import SentenceTransformer
class Embeddings:
def __init__(self):
"""
Initialize the class
"""
self.glove_embeddings_dim = 50
def download_glove_embeddings(self):
"""
Download glove embeddings from web or from your gdrive if in optimized format
"""
embeddings_temp = "embeddings_50d_temp.npy"
word_index_temp = "word_index_dict_50d_temp.pkl"
def load_glove_embeddings(self, embedding_dimension):
word_index_temp = "word_index_dict_50d_temp.pkl"
embeddings_temp = "embeddings_50d_temp.npy"
# Load word index dictionary
word_index_dict = pickle.load(open(word_index_temp, "rb"), encoding="latin")
# Load embeddings numpy
embeddings = np.load(embeddings_temp)
return word_index_dict, embeddings
def get_glove_embedding(self, word, word_index_dict, embeddings):
"""
Retrieve GloVe embedding of a specific dimension
"""
word = word.lower()
if word in word_index_dict:
return embeddings[word_index_dict[word]]
else:
return np.zeros(self.glove_embeddings_dim)
def embeddings_preprocess(self, word_index_dict, positive_words, negative_words, embeddings):
new_embedding = np.zeros(self.glove_embeddings_dim)
# for negative words
for word in negative_words:
new_embedding -= self.get_glove_embedding(word, word_index_dict, embeddings)
# for positive words
for word in positive_words:
new_embedding += self.get_glove_embedding(word, word_index_dict, embeddings)
return new_embedding
def get_sentence_transformer_embedding(self, sentence, transformer_name="all-MiniLM-L6-v2"):
"""
Encode a sentence using sentence transformer and return embedding
"""
sentenceTransformer = SentenceTransformer(transformer_name)
return sentenceTransformer.encode(sentence)
def get_averaged_glove_embeddings(self, sentence, embeddings_dict):
words = sentence.split(" ")
# Initialize an array of zeros for the embedding
glove_embedding = np.zeros(embeddings_dict['embeddings'].shape[1])
count_words = 0
for word in words:
word = word.lower() # Convert to lowercase to match the embeddings dictionary
if word in embeddings_dict['word_index']:
# Sum up embeddings for each word
glove_embedding += embeddings_dict['embeddings'][embeddings_dict['word_index'][word]]
count_words += 1
if count_words > 0:
# Average the embeddings
glove_embedding /= count_words
return glove_embedding
import numpy.linalg as la
import numpy as np
class Search:
def __init__(self, embeddings_model):
self.embeddings_model = embeddings_model
def cosine_similarity(self, x, y):
return np.dot(x,y)/max(la.norm(x)*la.norm(y),1e-3)
def get_topK_similar_categories(self, sentence, categories, top_k=10):
"""Return top K most similar categories to a given sentence."""
sentence_embedding = self.embeddings_model.get_sentence_transformer_embedding(sentence)
similarities = {category: self.cosine_similarity(sentence_embedding, category_embedding) for category, category_embedding in categories.items()}
return dict(sorted(similarities.items(), key=lambda item: item[1], reverse=True)[:top_k])
def normalize_func(self, vector):
"""Normalize a vector."""
norm = np.linalg.norm(vector)
return vector / norm if norm != 0 else vector
def find_closest_words(self, current_embedding, answer_list, word_index_dict, embeddings):
"""Find closest word from answer_list to current_embedding."""
highest_similarity, closest_answer = -50, None
for choice in answer_list:
choice_embedding = self.embeddings_model.get_glove_embedding(choice, word_index_dict, embeddings)
similarity = self.cosine_similarity(current_embedding, choice_embedding)
if similarity > highest_similarity:
highest_similarity, closest_answer = similarity, choice
return closest_answer
def find_word_as(self, current_relation, target_word, answer_list, word_index_dict, embeddings):
"""Find a word analogous to target_word based on current_relation."""
base_vector_a = self.embeddings_model.get_glove_embedding(current_relation[0], word_index_dict, embeddings)
base_vector_b = self.embeddings_model.get_glove_embedding(current_relation[1], word_index_dict, embeddings)
target_vector = self.embeddings_model.get_glove_embedding(target_word, word_index_dict, embeddings)
ref_difference = self.normalize_func(base_vector_b - base_vector_a)
answer, highest_similarity = None, -50
for choice in answer_list:
choice_vector = self.embeddings_model.get_glove_embedding(choice, word_index_dict, embeddings)
choice_difference = self.normalize_func(choice_vector - target_vector)
similarity = self.cosine_similarity(ref_difference, choice_difference)
if similarity > highest_similarity:
highest_similarity, answer = similarity, choice
return answer
def find_similarity_scores(self, current_embedding, choices, word_index_dict, embeddings):
"""Calculate similarity scores between current_embedding and choices."""
similarity_scores = {}
for choice in choices:
choice_embedding = self.embeddings_model.get_glove_embedding(choice, word_index_dict, embeddings)
similarity = self.cosine_similarity(current_embedding, choice_embedding)
similarity_scores[choice] = similarity
return similarity_scores
import matplotlib.pyplot as plt
def plot_pie_chart(category_simiarity_scores):
categories = list(category_simiarity_scores.keys())
cur_similarities = list(category_simiarity_scores.values())
similarities = [similar / sum(cur_similarities) for similar in cur_similarities]
fig, ax = plt.subplots()
ax.pie(similarities, labels=categories,
autopct="%1.11f%%",
startangle=90)
ax.axis('equal')
plt.show()
def plot_piechart_helper(sorted_cosine_scores_items):
sorted_cosine_scores = np.array(list(sorted_cosine_scores_items.values()))
categories_sorted = list(sorted_cosine_scores_items.keys())
fig, ax = plt.subplots(figsize=(3, 3))
my_explode = np.zeros(len(categories_sorted))
my_explode[0] = 0.2
if len(categories_sorted) == 3:
my_explode[1] = 0.1
elif len(categories_sorted) > 3:
my_explode[2] = 0.05
ax.pie(
sorted_cosine_scores,
labels=categories_sorted,
autopct="%1.11f%%",
explode=my_explode,
)
return fig
def plot_alatirchart(sorted_cosine_scores_models):
models = list(sorted_cosine_scores_models.keys())
tabs = st.tabs(models)
figs = {}
for model in models:
figs[model] = plot_piechart_helper(sorted_cosine_scores_models[model])
for index in range(len(tabs)):
with tabs[index]:
st.pyplot(figs[models[index]])
### Text Search ###
st.sidebar.title("sentence transformer")
if 'categories' not in st.session_state:
st.session_state['categories'] = "Flowers Colors Cars Weather Food"
if 'text_search' not in st.session_state:
st.session_state['text_search'] = "Roses are red, trucks are blue, and Seattle is grey right now"
embeddings_model = Embeddings()
model_type = "50d"
st.sidebar.write("Model Type: 50d")
st.title("in in-class coding practice1 Demo")
st.subheader(
"Pass in space separated categories you want this search demo to be about."
)
# categories of user input
user_categories = st.text_input(
label="Categories", value=st.session_state.categories
)
st.session_state.categories = user_categories.split(" ")
print(st.session_state.get("categories"))
print(type(st.session_state.get("categories")))
st.subheader("Pass in an input word or even a sentence")
user_text_search = st.text_input(
label="Input your sentence",
value=st.session_state.text_search,
)
st.session_state.text_search = user_text_search
# Load glove embeddings
word_index_dict, embeddings = embeddings_model.load_glove_embeddings(model_type)
category_embeddings = {category: embeddings_model.get_sentence_transformer_embedding(category) for category in
st.session_state.categories}
search_using_cos = Search(embeddings_model)
# Find closest word to an input word
if st.session_state.text_search:
# sentence transformer embeddings
print("sentence transformer Embedding")
embeddings_metadata = {
"word_index_dict": word_index_dict,
"embeddings": embeddings,
"model_type": model_type,
"text_search": st.session_state.text_search
}
with st.spinner("Obtaining Cosine similarity for Glove..."):
sorted_cosine_sim_transformer = search_using_cos.get_topK_similar_categories(
st.session_state.text_search, category_embeddings
)
# Results and Plot Pie Chart for Glove
print("Categories are: ", st.session_state.categories)
st.subheader(
"Closest word I have between: "
+ " ".join(st.session_state.categories)
+ " as per different Embeddings"
)
# print(sorted_cosine_sim_glove)
print(sorted_cosine_sim_transformer)
print(list(sorted_cosine_sim_transformer.keys())[0])
st.write(
f"Closest category using sentence transformer embeddings : {list(sorted_cosine_sim_transformer.keys())[0]}")
plot_alatirchart(
{
"sentence_transformer_384": sorted_cosine_sim_transformer,
}
)
st.write("")
st.write(
"Demo developed by Edward Xu"
)
|