chatbot / app.py
Zulelee's picture
Update app.py
6b03131
import gradio as gr
import openai
import os
os.environ["PINECONE_ENV"] = "asia-southeast1-gcp-free"
# Set your OpenAI GPT-3 API key
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Pinecone
from langchain.document_loaders.csv_loader import CSVLoader
# loader = CSVLoader(file_path="products_231022 - Products.csv", encoding="utf8")
# documents = loader.load()
# text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
# docs = text_splitter.split_documents(documents)
embeddings = OpenAIEmbeddings(openai_api_key = os.environ["OPENAI_API_KEY"])
import pinecone
# initialize pinecone
pinecone.init(
api_key=os.getenv("PINECONE_API_KEY"), # find at app.pinecone.io
environment=os.getenv("PINECONE_ENV"), # next to api key in console
)
index_name = "chatbot"
vectordb = Pinecone.from_existing_index(index_name, embeddings)
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
# # Define a function to generate responses using GPT-3
# def chatbot(input_text):
# # from langchain.chat_models import ChatOpenAI
# # llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0)
# # llm.predict("Hello world!")
# # completion = openai.ChatCompletion.create(
# # model="gpt-3.5-turbo",
# # max_tokens=50,
# # api_key=api_key,
# # messages=[
# # {"role": "user", "content": input_text}
# # ]
# # )
# return chain.run({'question': input_text})
# # Create a Gradio interface
# chatbot_interface = gr.Interface(
# fn=chatbot,
# inputs="text",
# outputs="text",
# title="Chatbot",
# )
# # Start the Gradio app
# chatbot_interface.launch(share=True)
import gradio as gr
import openai
import os
openai.api_key = os.getenv('OPENAI_API_KEY')
class Conversation:
def __init__(self, num_of_round):
self.num_of_round = num_of_round
self.messages = []
def ask(self, question):
try:
self.messages.append({"role": "user", "content": question})
retriever = vectordb.as_retriever()
llm = ChatOpenAI(model_name='gpt-3.5-turbo', temperature=0, openai_api_key = os.environ["OPENAI_API_KEY"])
memory = ConversationBufferMemory(memory_key="chat_history", return_messages= True)
chain = ConversationalRetrievalChain.from_llm(llm, retriever= retriever, memory= memory)
response = chain.run({'question': question})
except Exception as e:
print(e)
return e
message = response
# 最新的答案拼接进 messages
self.messages.append({"role": "assistant", "content": message})
if len(self.messages) > self.num_of_round*2 + 1:
del self.messages[1:3] # Remove the first round conversation left.
return message
conv = Conversation(10)
def answer(question, history=[]):
history.append(question)
response = conv.ask(question)
history.append(response)
responses = [(u, b) for u, b in zip(history[::2], history[1::2])]
return responses, history
with gr.Blocks(css="#chatbot{height:300px} .overflow-y-auto{height:500px}") as demo:
chatbot = gr.Chatbot(elem_id="chatbot")
state = gr.State([])
with gr.Row():
txt = gr.Textbox(show_label=False, placeholder="Enter question and press enter")
txt.submit(answer, [txt, state], [chatbot, state])
demo.launch()