arm / app.py
aheman20's picture
new update
26ec4e9 verified
import gradio as gr
import pinecone
import openai
import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import Pinecone
from langchain.prompts import PromptTemplate
BOOK_TOKEN = os.getenv("book")
pine = os.getenv("pine")
HF_TOKEN = os.getenv("HF_TOKEN")
os.environ["OPENAI_API_KEY"] = BOOK_TOKEN
OPENAI_API_KEY = ""
PINECONE_API_KEY = ""
PINECONE_API_ENV = "us-east-1-aws"
#embedding = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEYs)
embed_model = "text-embedding-ada-002"
pinecone.init(
api_key=pine,
environment=PINECONE_API_ENV
)
openai.api_key=BOOK_TOKEN
index_n = "langchain2"
index = pinecone.Index(index_n)
index.describe_index_stats()
limit = 3750
llm = ChatOpenAI(temperature=0, model_name="gpt-4" )
embeddings = OpenAIEmbeddings(
model="text-embedding-ada-002"
)
#get the db index
db = Pinecone.from_existing_index(index_name=index_n, embedding=embeddings)
#add the template
with gr.Blocks() as demo:
chatbot = gr.Chatbot(label="Talk to the Book")
msg = gr.Textbox()
clear = gr.Button("Clear")
chat_history = []
def user(user_message, chat_history):
formatted_history = '\n'.join(["Friend: {}\nAI : {}".format(pair[0], pair[1]) for pair in chat_history])
prompt = """
The follwoing is a chat betwen a human and expert AI assistant. The AI assistant provides the answer along with the section it referred to for the answer. The AI should only refer to the context and should not provide answers if it doesnt know.
Current Conversation:
{formatted_history}
Friend: {user_message}
AI:
"""
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=False
)
#Initalize lanchain - Conversation Retrieval Chain
qa = ConversationalRetrievalChain.from_llm(llm, retriever=db.as_retriever(), memory=memory)
question_input = {'question': user_message, "chat_history": chat_history}
response = qa(question_input)
answer = response["answer"]
#get response from QA Chain
#response = qa({'question': user_message, "chat_history": chat_history})
#append user message and respone to chat history
chat_history.append((user_message, answer))
return gr.update(value=""), chat_history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch(debug=False)