Spaces:
Sleeping
Sleeping
import gradio as gr | |
import pinecone | |
import openai | |
import os | |
from langchain.embeddings.openai import OpenAIEmbeddings | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain.chat_models import ChatOpenAI | |
from langchain.memory import ConversationBufferMemory | |
from langchain.vectorstores import Pinecone | |
from langchain.prompts.prompt import PromptTemplate | |
BOOK_TOKEN = os.getenv("book") | |
pine = os.getenv("pine") | |
HF_TOKEN = os.getenv("HF_TOKEN") | |
os.environ["OPENAI_API_KEY"] = BOOK_TOKEN | |
OPENAI_API_KEY = "" | |
PINECONE_API_KEY = "" | |
PINECONE_API_ENV = "gcp-starter" | |
#embedding = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEYs) | |
embed_model = "text-embedding-ada-003" | |
pinecone.init( | |
api_key=pine, | |
environment=PINECONE_API_ENV | |
) | |
openai.api_key=BOOK_TOKEN | |
index_n = "hospital-code" | |
index = pinecone.Index(index_n) | |
index.describe_index_stats() | |
limit = 3750 | |
llm = ChatOpenAI(temperature=0, model_name="gpt-4" ) | |
embeddings = OpenAIEmbeddings( | |
model="text-embedding-3-large" | |
) | |
#get the db index | |
db = Pinecone.from_existing_index(index_name=index_n, embedding=embeddings) | |
theme = gr.themes.Soft( | |
primary_hue="emerald", | |
).set( | |
block_background_fill='black') | |
with gr.Blocks(theme=theme) as demo: | |
chatbot = gr.Chatbot(label="Talk to the Bot", show_copy_button=True, show_label=True) | |
msg = gr.Textbox() | |
clear = gr.Button("Clear") | |
chat_history = [] | |
def vote(data: gr.LikeData): | |
if data.liked: | |
print("You upvoted this response: " + data.value) | |
else: | |
print("You downvoted this reposnse: " + data.value) | |
def user(user_message, chat_history): | |
memory = ConversationBufferMemory( | |
memory_key='chat_history', | |
return_messages=False | |
) | |
#PUT IT IN A PROMPT TEMPLATE | |
#template = """The following is chat between a human and an AI assistant. The AI provides the answer along with the section it referred to for the answer. | |
#Current Conversation: | |
#{history} | |
#Friend: {input} | |
#AI: | |
#""" | |
#PROMPT = PromptTemplate(input_variables=["history", "input"], template=template) | |
#Initalize lanchain - Conversation Retrieval Chain | |
qa = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0), retriever=db.as_retriever(), memory=memory) | |
#get response from QA Chain | |
response = qa({'question': user_message, "chat_history": chat_history}) | |
#append user message and respone to chat history | |
chat_history.append((user_message, response["answer"])) | |
return gr.update(value=""), chat_history | |
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False) | |
chatbot.like(vote, None, None) | |
clear.click(lambda: None, None, chatbot, queue=False) | |
if __name__ == "__main__": | |
demo.launch(share=True) |