AskQ / app.py
mcbrs1's picture
Update app.py
6119df8
raw
history blame
6.44 kB
from __future__ import annotations
import openai
import gradio as gr
import os
import re
import numpy as np
import pandas as pd
import pickle
import tiktoken
from datasets import load_dataset
openai.api_key = os.getenv("OPENAI_API_KEY")
COMPLETIONS_MODEL = "text-davinci-003"
EMBEDDING_MODEL = "text-embedding-ada-002"
def openai_chat(prompt):
completions = openai.Completion.create(
#engine="text-davinci-003",
#engine="davinci:ft-cardiff-university-2023-01-16-12-26-24",
engine=COMPLETIONS_MODEL,
prompt=prompt,
max_tokens=1024,
n=1,
temperature=0,
#temperature=0.5,
)
message = completions.choices[0].text
return message.strip()
#Get Data
#df = pd.read_csv('https://cdn.openai.com/API/examples/data/olympics_sections_text.csv')
#df = df.set_index(["title", "heading"])
def get_embedding(text: str, model: str=EMBEDDING_MODEL) -> list[float]:
result = openai.Embedding.create(
model=model,
input=text
)
return result["data"][0]["embedding"]
def compute_doc_embeddings(df: pd.DataFrame) -> dict[tuple[str, str], list[float]]:
"""
Create an embedding for each row in the dataframe using the OpenAI Embeddings API.
Return a dictionary that maps between each embedding vector and the index of the row that it corresponds to.
"""
return {
idx: get_embedding(r.content) for idx, r in df.iterrows()
}
def load_embeddings(fname: str) -> dict[tuple[str, str], list[float]]:
"""
Read the document embeddings and their keys from a CSV.
fname is the path to a CSV with exactly these named columns:
"title", "heading", "0", "1", ... up to the length of the embedding vectors.
"""
df = pd.read_csv(fname, header=0)
max_dim = max([int(c) for c in df.columns if c != "title" and c != "heading"])
return {
(r.title, r.heading): [r[str(i)] for i in range(max_dim + 1)] for _, r in df.iterrows()
}
'''
#To calculate on the fly
'''
#document_embeddings = compute_doc_embeddings(df)
#document_embeddings = load_embeddings("https://cdn.openai.com/API/examples/data/olympics_sections_document_embeddings.csv")
#data_files = {"RadiationWikiEmbedding.csv"}dataset = load_dataset("mcbrs1/RadiationWiki", data_files=data_files)
document_embeddings = load_embeddings("RadiationWikiEmbedding.csv")
def vector_similarity(x: list[float], y: list[float]) -> float:
"""
Returns the similarity between two vectors.
Because OpenAI Embeddings are normalized to length 1, the cosine similarity is the same as the dot product.
"""
return np.dot(np.array(x), np.array(y))
def order_document_sections_by_query_similarity(query: str, contexts: dict[(str, str), np.array]) -> list[(float, (str, str))]:
"""
Find the query embedding for the supplied query, and compare it against all of the pre-calculated document embeddings
to find the most relevant sections.
Return the list of document sections, sorted by relevance in descending order.
"""
query_embedding = get_embedding(query)
document_similarities = sorted([
(vector_similarity(query_embedding, doc_embedding), doc_index) for doc_index, doc_embedding in contexts.items()
], reverse=True)
return document_similarities
ox = order_document_sections_by_query_similarity("Who won the men's high jump?", document_embeddings)
#print(f"{ox[0]} entries")
MAX_SECTION_LEN = 500
SEPARATOR = "\n* "
ENCODING = "cl100k_base" # encoding for text-embedding-ada-002
encoding = tiktoken.get_encoding(ENCODING)
separator_len = len(encoding.encode(SEPARATOR))
print(f"Context separator contains {separator_len} tokens")
print(f"The input contains {input}")
def construct_prompt(question: str, context_embeddings: dict, df: pd.DataFrame) -> str:
"""
Fetch relevant
"""
most_relevant_document_sections = order_document_sections_by_query_similarity(question, context_embeddings)
chosen_sections = []
chosen_sections_len = 0
chosen_sections_indexes = []
for _, section_index in most_relevant_document_sections:
# Add contexts until we run out of space.
document_section = df.loc[section_index]
chosen_sections_len += document_section.tokens + separator_len
if chosen_sections_len > MAX_SECTION_LEN:
break
chosen_sections.append(SEPARATOR + document_section.content.replace("\n", " "))
chosen_sections_indexes.append(str(section_index))
# Useful diagnostic information
print(f"Selected {len(chosen_sections)} document sections:")
print("\n".join(chosen_sections_indexes))
header = """Answer the question as truthfully as possible using the provided context, and if the answer is not contained within the text below, say "I don't know."\n\nContext:\n"""
return header + "".join(chosen_sections) + "\n\n Q: " + question + "\n A:"
#promptX = construct_prompt(
# #input,
# "Who won the 2020 Summer Olympics men's high jump?",
# document_embeddings,
# df
#)
#print("===\n", prompt)
def chatbot(input, history=[]):
promptX = construct_prompt(
input,
#"Who won the 2020 Summer Olympics men's high jump?",
document_embeddings,
df)
output = openai_chat(promptX)
#history.append((input, output))
return output
interface = gr.Blocks(css=".gradio-container {background-color: rgb(0,255,255)}")
#interface = gr.Blocks(css='div {background-image: url("https://wtamu.edu/~cbaird/sq/images/radiation.png")}')
#interface = gr.Blocks(css='div {background-image: url("https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcT2YALal1tHWTI90H22oBPvxXXVIdTDceesfw&usqp=CAU")}')
#
with interface:
gr.Markdown("**<center>WELCOME to PETICs QPULSE CHATBOT!</center>**")
gr.Markdown("*<center>Brought to Bou by Dr Rhodri Smith</center>*")
gr.Markdown("<center>Go Ahead, AskQ a Question, I Am Trained on Qpulse Content</center>")
with gr.Row():
with gr.Column():
input = gr.Textbox(label = "Question")
inpt_btn = gr.Button(value="Ask Q")
with gr.Column():
output = gr.Textbox(label="Answer")
inpt_btn.click(chatbot, inputs = input, outputs = output)
interface.launch(inline = True)
#print("===\n", outputs)