Jarvis_QuAn_v02 / app.py
Jhoeel's picture
Update app.py
a8d33f6
raw
history blame
No virus
4.17 kB
import os
import numpy as np
import openai
import pandas as pd
import tiktoken
import gradio as gr
COMPLETIONS_MODEL = "text-davinci-003"
EMBEDDING_MODEL = "text-embedding-ada-002"
openai.api_key = os.getenv("OPENAI_API_KEY")
# 1) Preprocess the document library
df = pd.read_csv("informacion_neo_tokenizado.csv")
df = df.set_index(["title", "heading"])
def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]:
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"]
# uncomment the below line to caculate embeddings from scratch. ========
def compute_doc_embeddings(df: pd.DataFrame) -> dict[tuple[str, str], list[float]]:
return {
idx: get_embedding(r.content) for idx, r in df.iterrows()
}
document_embeddings = compute_doc_embeddings(df)
# 2) Find the most similar document embeddings to the question embedding
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query: str, contexts: dict[(str, str), np.array]) -> list[(float, (str, str))]:
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
# 3) Add the most relevant document sections to the query prompt
MAX_SECTION_LEN = 500
SEPARATOR = "\n* "
ENCODING = "gpt2" # encoding for text-davinci-003
encoding = tiktoken.get_encoding(ENCODING)
separator_len = len(encoding.encode(SEPARATOR))
def construct_prompt(question: str, context_embeddings: dict, df: pd.DataFrame) -> str:
"""
Fetch relevant
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + separator_len
if chosen_sections_len > MAX_SECTION_LEN:
break
chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
chosen_sections_indexes.append(str(section_index))
header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:"
prompt = construct_prompt(
"Who won the 2020 Summer Olympics men's high jump?",
document_embeddings,
df
)
# 4) Answer the user's question based on the context.
COMPLETIONS_API_PARAMS = {
# We use temperature of 0.0 because it gives the most predictable, factual answer.
"temperature": 0.0,
"max_tokens": 300,
"model": COMPLETIONS_MODEL,
}
def answer_query_with_context(
query: str,
df: pd.DataFrame,
document_embeddings: dict[(str, str), np.array]
) -> str:
prompt = construct_prompt(
query,
document_embeddings,
df
)
response = openai.Completion.create(
prompt=prompt,
**COMPLETIONS_API_PARAMS
)
return response["choices"][0]["text"].strip(" \n")
def answer_question(query):
return answer_query_with_context(query, df, document_embeddings)
iface = gr.Interface(fn=answer_question, inputs="text", outputs="text")
iface.launch()