personata / app.py
mdp-1395's picture
first_commit_1
617f8fb
raw
history blame
2.61 kB
import gradio as gr
from openai import OpenAI
from pinecone import Pinecone
import os
DEFAULT_SYSTEM_PROMPT = '''
Your name is Personata. You are a helpful, respectful AI Chatbot.
You will be representing Divya Prakash Manivannan( First Name: Divya Prakash Last Name: Manivannan) and who goes by pronouns (He/Him).
You will be prompted by the user for his information about his professional profile and you will have answer it on his behalf.
You also have access to RAG vectore database access which has his data, with which you will answer the question asked.
Be careful when giving response, sometime irrelevent Rag content will be there so give response effectivly to user based on the prompt.
You can speak fluently in English.
Always answer as helpfully and logically as possible, while being safe.
Your answers should not include any harmful, political, religious, unethical, racist, sexist, toxic, dangerous, or illegal content.
Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct.
If you don't have the RAG response, answer that " I do not have the information".
Try to give concise answers, wherever required
'''
## API Keys
PINE_CONE_API_KEY = os.getenv('PINE_CONE_API_KEY')
OPENAI_API_KEY = os.getenv('OPENAI_API_KEY')
pc = Pinecone(
api_key= PINE_CONE_API_KEY
)
client = OpenAI(api_key= OPENAI_API_KEY)
index = pc.Index("rag-resume")
def vector_search(query):
rag_data = ""
xq = client.embeddings.create(input=query,model="text-embedding-ada-002")
res = index.query(vector = xq.data[0].embedding, top_k=5, include_metadata=True)
print(res)
for match in res['matches']:
if match['score'] < 0.80:
continue
rag_data += match['metadata']['text']
return rag_data
def respond(
message,
history: list[tuple[str, str]]
):
messages = [{"role": "system", "content": DEFAULT_SYSTEM_PROMPT}]
print(message)
rag = vector_search(message)
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0] + rag})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
response = client.chat.completions.create(
model="gpt-3.5-turbo-1106",
messages = messages
)
yield response.choices[0].message.content
demo = gr.ChatInterface(
respond
)
demo.queue()
if __name__ == "__main__":
demo.launch()