The_Art_Story / app.py
farhananis005's picture
Update app.py
56da93a verified
raw
history blame
3.51 kB
import os
import openai
os.environ["TOKENIZERS_PARALLELISM"] = "false"
os.environ["OPENAI_API_KEY"]
os.environ["GROQ_API_KEY"]
global agent
def create_agent():
from langchain_groq import ChatGroq
from langchain.chains.conversation.memory import ConversationSummaryBufferMemory
from langchain.chains import ConversationChain
global agent
llm = ChatGroq(temperature=0, model_name="mixtral-8x7b-32768")
memory = ConversationSummaryBufferMemory(llm=llm, max_token_limit=1000)
agent = ConversationChain(llm=llm, memory=memory, verbose=True)
return "Successful!"
def formatted_response(docs, question, response, state):
formatted_output = response + "\n\nSources"
for i, doc in enumerate(docs):
source_info = doc.metadata.get("source", "Unknown source")
page_info = doc.metadata.get("page", None)
doc_name = source_info.split("/")[-1].strip()
if page_info is not None:
formatted_output += f"\n{doc_name}\tpage no {page_info}"
else:
formatted_output += f"\n{doc_name}"
state.append((question, formatted_output))
return state, state
def search_docs(prompt, question, state,k):
from langchain_openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.callbacks import get_openai_callback
global agent
agent = agent
state = state or []
embeddings = OpenAIEmbeddings()
docs_db = FAISS.load_local(
"/home/user/app/docs_db/",
embeddings,
allow_dangerous_deserialization=True
)
docs = docs_db.similarity_search(question,int(k))
prompt += "\n\n"
prompt += question
prompt += "\n\n"
prompt += str(docs)
with get_openai_callback() as cb:
response = agent.predict(input=prompt)
print(cb)
return formatted_response(docs, question, response, state)
import gradio as gr
css = """
.col{
max-width: 75%;
margin: 0 auto;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
}
"""
with gr.Blocks(css=css) as demo:
gr.Markdown("## <center>Your AI Medical Assistant</center>")
with gr.Tab("Your AI Assistant"):
with gr.Column(elem_classes="col"):
with gr.Tab("Query Documents"):
with gr.Column():
create_agent_button = gr.Button("Create Agent")
create_agent_output = gr.Textbox(label="Output")
docs_prompt_input = gr.Textbox(label="Custom Prompt")
k=gr.Textbox(label="Number of Chunks")
docs_chatbot = gr.Chatbot(label="Chats")
docs_state = gr.State()
docs_search_input = gr.Textbox(label="Question")
docs_search_button = gr.Button("Search")
gr.ClearButton(
[docs_prompt_input, docs_search_input, create_agent_output]
)
#########################################################################################################
create_agent_button.click(create_agent, inputs=None, outputs=create_agent_output)
docs_search_button.click(
search_docs,
inputs=[docs_prompt_input, docs_search_input, docs_state,k],
outputs=[docs_chatbot, docs_state],
)
#########################################################################################################
demo.queue()
demo.launch()