Spaces:
Sleeping
Sleeping
from llm.utils import initialize_LLM, format_chat_history, postprocess | |
from db.utils import initialize_database | |
import gradio as gr | |
import spaces | |
def initializer(list_file_obj, llm_temperature, max_tokens, top_k, thold, progress=gr.Progress()): | |
vdb=initialize_database(list_file_obj) | |
qa_chain=initialize_LLM(llm_temperature, max_tokens, top_k, vdb, thold)#, progress) | |
return qa_chain, "Success." | |
#spaces.GPU | |
def conversation(qa_chain, message, history): | |
formatted_chat_history = format_chat_history(history)#message, history) | |
# Generate response using QA chain | |
response = qa_chain.invoke({"question": message, "chat_history": formatted_chat_history}) | |
response_answer = postprocess(response)#response["answer"] | |
#if response_answer.find("Helpful Answer:") != -1: | |
#response_answer = response_answer.split("Helpful Answer:")[-1] | |
#response_sources = response["source_documents"] | |
#response_source1 = response_sources[0].page_content.strip() | |
#response_source2 = response_sources[1].page_content.strip() | |
#response_source3 = response_sources[2].page_content.strip() | |
# Langchain sources are zero-based | |
#response_source1_page = response_sources[0].metadata["page"] + 1 | |
#response_source2_page = response_sources[1].metadata["page"] + 1 | |
#response_source3_page = response_sources[2].metadata["page"] + 1 | |
# Append user message and response to chat history | |
new_history = history + [(message, response_answer)] | |
return qa_chain, gr.update(value=""), new_history #, response_source1, response_source1_page, response_source2, response_source2_page, response_source3, response_source3_page |