hospital-code / app.py
aheman20's picture
Update app.py
44be062
raw
history blame
No virus
2.02 kB
import gradio as gr
import pinecone
import openai
import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import Pinecone
BOOK_TOKEN = os.getenv("book")
pine = os.getenv("pine")
HF_TOKEN = os.getenv("HF_TOKEN")
os.environ["OPENAI_API_KEY"] = BOOK_TOKEN
OPENAI_API_KEY = ""
PINECONE_API_KEY = ""
PINECONE_API_ENV = "gcp-starter"
#embedding = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEYs)
embed_model = "text-embedding-ada-002"
pinecone.init(
api_key=pine,
environment=PINECONE_API_ENV
)
openai.api_key=BOOK_TOKEN
index_n = "ibc-12"
index = pinecone.Index(index_n)
index.describe_index_stats()
limit = 3750
llm = ChatOpenAI(temperature=0, model_name="gpt-4" )
embeddings = OpenAIEmbeddings(
model="text-embedding-ada-002"
)
#get the db index
db = Pinecone.from_existing_index(index_name=index_n, embedding=embeddings)
with gr.Blocks() as demo:
chatbot = gr.Chatbot(label="Talk to the Bot")
msg = gr.Textbox()
clear = gr.Button("Clear")
chat_history = []
def user(user_message, chat_history):
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=False
)
#Initalize lanchain - Conversation Retrieval Chain
qa = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0), retriever=db.as_retriever(), memory=memory)
#get response from QA Chain
response = qa({'question': user_message, "chat_history": chat_history})
#append user message and respone to chat history
chat_history.append((user_message, response["answer"]))
return gr.update(value=""), chat_history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch(debug=True)