Spaces:
Sleeping
Sleeping
File size: 2,828 Bytes
d942eed 0477a71 d942eed edb326a d942eed adafa68 7ee5835 d942eed adafa68 ba5e80b d942eed 94fd419 d942eed c32aa1c 4794d32 e952f93 d942eed e952f93 d942eed 94fd419 d942eed |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 |
import gradio as gr
import pinecone
import openai
import os
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from langchain.memory import ConversationBufferMemory
from langchain.vectorstores import Pinecone
from langchain.prompts.prompt import PromptTemplate
BOOK_TOKEN = os.getenv("book")
pine = os.getenv("pine")
HF_TOKEN = os.getenv("HF_TOKEN")
os.environ["OPENAI_API_KEY"] = BOOK_TOKEN
OPENAI_API_KEY = ""
PINECONE_API_KEY = ""
PINECONE_API_ENV = "gcp-starter"
#embedding = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEYs)
embed_model = "text-embedding-ada-002"
pinecone.init(
api_key=pine,
environment=PINECONE_API_ENV
)
openai.api_key=BOOK_TOKEN
index_n = "ibc-12"
index = pinecone.Index(index_n)
index.describe_index_stats()
limit = 3750
llm = ChatOpenAI(temperature=0, model_name="gpt-4" )
embeddings = OpenAIEmbeddings(
model="text-embedding-ada-002"
)
#get the db index
db = Pinecone.from_existing_index(index_name=index_n, embedding=embeddings)
theme = gr.themes.Soft(
primary_hue="emerald",
).set(
block_background_fill='black')
with gr.Blocks(theme=theme) as demo:
chatbot = gr.Chatbot(label="Talk to the Bot", show_copy_button=True)
msg = gr.Textbox()
clear = gr.Button("Clear")
chat_history = []
def vote(data: gr.LikeData):
if data.liked:
print("You upvoted this response: " + data.value)
else:
print("You downvoted this reposnse: " + data.value)
def user(user_message, chat_history):
memory = ConversationBufferMemory(
memory_key='chat_history',
return_messages=False
)
#PUT IT IN A PROMPT TEMPLATE
#template = """The following is chat between a human and an AI assistant. The AI provides the answer along with the section it referred to for the answer.
#Current Conversation:
#{history}
#Friend: {input}
#AI:
#"""
#PROMPT = PromptTemplate(input_variables=["history", "input"], template=template)
#Initalize lanchain - Conversation Retrieval Chain
qa = ConversationalRetrievalChain.from_llm(ChatOpenAI(temperature=0), retriever=db.as_retriever(), memory=memory)
#get response from QA Chain
response = qa({'question': user_message, "chat_history": chat_history})
#append user message and respone to chat history
chat_history.append((user_message, response["answer"]))
return gr.update(value=""), chat_history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False)
chatbot.like(vote, None, None)
clear.click(lambda: None, None, chatbot, queue=False)
if __name__ == "__main__":
demo.launch(debug=True) |