|
"""Main entrypoint for the app.""" |
|
import json |
|
import os |
|
import time |
|
from queue import Queue |
|
from timeit import default_timer as timer |
|
from typing import List, Optional |
|
|
|
from langchain.embeddings import HuggingFaceInstructEmbeddings |
|
from langchain.vectorstores.chroma import Chroma |
|
from langchain.vectorstores.faiss import FAISS |
|
from lcserve import serving |
|
from pydantic import BaseModel |
|
|
|
from app_modules.presets import * |
|
from app_modules.qa_chain import QAChain |
|
from app_modules.utils import * |
|
|
|
|
|
init_settings() |
|
|
|
|
|
os.environ["CURL_CA_BUNDLE"] = "" |
|
|
|
hf_embeddings_device_type, hf_pipeline_device_type = get_device_types() |
|
print(f"hf_embeddings_device_type: {hf_embeddings_device_type}") |
|
print(f"hf_pipeline_device_type: {hf_pipeline_device_type}") |
|
|
|
hf_embeddings_model_name = ( |
|
os.environ.get("HF_EMBEDDINGS_MODEL_NAME") or "hkunlp/instructor-xl" |
|
) |
|
n_threds = int(os.environ.get("NUMBER_OF_CPU_CORES") or "4") |
|
index_path = os.environ.get("FAISS_INDEX_PATH") or os.environ.get("CHROMADB_INDEX_PATH") |
|
using_faiss = os.environ.get("FAISS_INDEX_PATH") is not None |
|
llm_model_type = os.environ.get("LLM_MODEL_TYPE") |
|
chat_history_enabled = os.environ.get("CHAT_HISTORY_ENABLED") == "true" |
|
show_param_settings = os.environ.get("SHOW_PARAM_SETTINGS") == "true" |
|
share_gradio_app = os.environ.get("SHARE_GRADIO_APP") == "true" |
|
|
|
|
|
streaming_enabled = True |
|
|
|
start = timer() |
|
embeddings = HuggingFaceInstructEmbeddings( |
|
model_name=hf_embeddings_model_name, |
|
model_kwargs={"device": hf_embeddings_device_type}, |
|
) |
|
end = timer() |
|
|
|
print(f"Completed in {end - start:.3f}s") |
|
|
|
start = timer() |
|
|
|
print(f"Load index from {index_path} with {'FAISS' if using_faiss else 'Chroma'}") |
|
|
|
if not os.path.isdir(index_path): |
|
raise ValueError(f"{index_path} does not exist!") |
|
elif using_faiss: |
|
vectorstore = FAISS.load_local(index_path, embeddings) |
|
else: |
|
vectorstore = Chroma(embedding_function=embeddings, persist_directory=index_path) |
|
|
|
end = timer() |
|
|
|
print(f"Completed in {end - start:.3f}s") |
|
|
|
start = timer() |
|
qa_chain = QAChain(vectorstore, llm_model_type) |
|
qa_chain.init(n_threds=n_threds, hf_pipeline_device_type=hf_pipeline_device_type) |
|
end = timer() |
|
print(f"Completed in {end - start:.3f}s") |
|
|
|
|
|
class ChatResponse(BaseModel): |
|
"""Chat response schema.""" |
|
|
|
token: Optional[str] = None |
|
error: Optional[str] = None |
|
sourceDocs: Optional[List] = None |
|
|
|
|
|
@serving(websocket=True) |
|
def chat(question: str, history: Optional[List], **kwargs) -> str: |
|
|
|
streaming_handler = kwargs.get("streaming_handler") if streaming_enabled else None |
|
chat_history = [] |
|
if chat_history_enabled: |
|
for element in history: |
|
item = (element[0] or "", element[1] or "") |
|
chat_history.append(item) |
|
|
|
start = timer() |
|
result = qa_chain.call( |
|
{"question": question, "chat_history": chat_history}, streaming_handler |
|
) |
|
end = timer() |
|
print(f"Completed in {end - start:.3f}s") |
|
|
|
resp = ChatResponse(sourceDocs=result["source_documents"]) |
|
|
|
if not streaming_enabled: |
|
resp.token = remove_extra_spaces(result["answer"]) |
|
print(resp.token) |
|
|
|
return json.dumps(resp.dict()) |
|
|
|
|
|
if __name__ == "__main__": |
|
print_llm_response(json.loads(chat("What is PCI DSS?", []))) |
|
|