# %% env
import os

# os.environ["OPENAI_API_KEY"] = ''
# %% use env
from dotenv import load_dotenv

load_dotenv()
# %% import embeddings
# ImportError: Could not import tiktoken python package.
# This is needed in order to for OpenAIEmbeddings. Please install it with `pip install tiktoken`.
from langchain_community.embeddings import OpenAIEmbeddings

# %% Initialize the OpenAlEmbeddings object
embeddings = OpenAIEmbeddings()

# %% data
import pandas as pd

# ImportError: Missing optional dependency 'openpyxl'.  Use pip or conda to install openpyxl.
df = pd.read_excel('data.xlsx')
print(df)

# %% get_embedding
'''
We can determine how similar a word is to other words in our dataframe after we have a vector representing that word.
By computing the cosine similarity of the word vector for our search term to each word embedding in our dataframe.
'''
from openai import OpenAI

client = OpenAI()


def get_embedding(text, model="text-embedding-ada-002"):
    text = text.replace("\n", " ")
    return client.embeddings.create(
        input=[text],
        model=model
    ).data[0].embedding


# %%
'''
We can use "apply"to apply the get_embedding function to each row in the dataframe because our words are stored in a pandas dataframe.In order to save
time and to save the calculated word embeddings in a new csv file called "word_embeddings.csv"rather than calling OpenAl once more to carry out these
computations.
'''
# 需要等很长时间 ...
df['embedding'] = df['Words'].apply(lambda x: embeddings.embed_query(x))
# df['embedding'] = df['Words'].apply(lambda x: get_embedding(x))
df.to_csv('word_embeddings.csv')

# %%
'''
Let's load the existing file,which contains the embeddings,
so that we can save chargers by not hitting the APl repeatedly
'''
new_df = pd.read_csv('word_embeddings.csv')
print(new_df)

# %% Let's get the embeddings for our text
our_txt = 'Mango'
txt_embedding = embeddings.embed_query(our_txt)
print(f'Our embedding is {txt_embedding}')

# %% utils
import numpy as np


def calculate_cosine_similarity(vector_a, vector_b):
    # Compute the dot product of the two vectors
    dot_product = np.dot(vector_a, vector_b)

    # Compute the L2 norms (magnitudes) of each vector
    norm_a = np.linalg.norm(vector_a)
    norm_b = np.linalg.norm(vector_b)

    # Compute the cosine similarity
    # Note: We add a small epsilon value to the denominator for numerical stability
    epsilon = 1e-10
    cosine_similarity = dot_product / (norm_a * norm_b + epsilon)

    return cosine_similarity


# %% similarity
# old version < 1.0
# from openai.embeddings_utils import cosine_similarity
# from sklearn.metrics.pairwise import cosine_similarity

df['similarity score'] = df['embedding'].apply(lambda x: calculate_cosine_similarity(x, txt_embedding))
# df['similarity score'] = df['embedding'].apply(lambda x: cosine_similarity(x, txt_embedding))
print(df)

# %%
'''
Sorting by similarity values in dataframe reveals Banana,Orange,and Apple are closest to searched term,such as Mango.
'''

res = df.sort_values('similarity score', ascending=False).head(10)
print(res)
