katara / main.py
Daniel Marques
feat: add save document route
d5863e7
raw
history blame
6.16 kB
from typing import Union
from fastapi import FastAPI, HTTPException, UploadFile, WebSocket
from fastapi.staticfiles import StaticFiles
from pydantic import BaseModel
import pickle
import uvicorn
import logging
import os
import shutil
import subprocess
import torch
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.prompts import PromptTemplate
# from langchain.embeddings import HuggingFaceEmbeddings
from run_localGPT import load_model
from prompt_template_utils import get_prompt_template
# from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.vectorstores import Chroma
from werkzeug.utils import secure_filename
from constants import CHROMA_SETTINGS, EMBEDDING_MODEL_NAME, PERSIST_DIRECTORY, MODEL_ID, MODEL_BASENAME
# if torch.backends.mps.is_available():
# DEVICE_TYPE = "mps"
# elif torch.cuda.is_available():
# DEVICE_TYPE = "cuda"
# else:
# DEVICE_TYPE = "cpu"
DEVICE_TYPE = "cuda"
SHOW_SOURCES = True
EMBEDDINGS = HuggingFaceInstructEmbeddings(model_name=EMBEDDING_MODEL_NAME, model_kwargs={"device": DEVICE_TYPE})
# load the vectorstore
DB = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=EMBEDDINGS,
client_settings=CHROMA_SETTINGS,
)
RETRIEVER = DB.as_retriever()
LLM = load_model(device_type=DEVICE_TYPE, model_id=MODEL_ID, model_basename=MODEL_BASENAME)
prompt, memory = get_prompt_template(promptTemplate_type="llama", history=False)
template = """you are a helpful, respectful and honest assistant.
Your name is Katara llma. You should only use the source documents provided to answer the questions.
You should only respond only topics that contains in documents use to training.
Use the following pieces of context to answer the question at the end.
Always answer in the most helpful and safe way possible.
If you don't know the answer to a question, just say that you don't know, don't try to make up an answer, don't share false information.
Use 15 sentences maximum. Keep the answer as concise as possible.
Always say "thanks for asking!" at the end of the answer.
{context}
Question: {question}
Helpful Answer:"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
QA = RetrievalQA.from_chain_type(
llm=LLM,
chain_type="stuff",
retriever=RETRIEVER,
return_source_documents=SHOW_SOURCES,
chain_type_kwargs={
"prompt": QA_CHAIN_PROMPT,
},
)
class Predict(BaseModel):
prompt: str
app = FastAPI(title="homepage-app")
api_app = FastAPI(title="api app")
app.mount("/api", api_app, name="api")
app.mount("/", StaticFiles(directory="static",html = True), name="static")
@api_app.post('/predict')
async def predict(data: Predict):
user_prompt = data.prompt
if user_prompt:
# print(f'User Prompt: {user_prompt}')
# Get the answer from the chain
res = QA(user_prompt)
answer, docs = res["result"], res["source_documents"]
prompt_response_dict = {
"Prompt": user_prompt,
"Answer": answer,
}
prompt_response_dict["Sources"] = []
for document in docs:
prompt_response_dict["Sources"].append(
(os.path.basename(str(document.metadata["source"])), str(document.page_content))
)
return {"response": prompt_response_dict}
else:
raise HTTPException(status_code=400, detail="Prompt Incorrect")
@api_app.get("/run_ingest")
def run_ingest_route():
try:
if os.path.exists(PERSIST_DIRECTORY):
try:
shutil.rmtree(PERSIST_DIRECTORY)
except OSError as e:
raise HTTPException(status_code=500, detail=f"Error: {e.filename} - {e.strerror}.")
else:
raise HTTPException(status_code=500, detail="The directory does not exist")
run_langest_commands = ["python", "ingest.py"]
if DEVICE_TYPE == "cpu":
run_langest_commands.append("--device_type")
run_langest_commands.append(DEVICE_TYPE)
result = subprocess.run(run_langest_commands, capture_output=True)
if result.returncode != 0:
raise HTTPException(status_code=400, detail="Script execution failed: {}")
# load the vectorstore
DB = Chroma(
persist_directory=PERSIST_DIRECTORY,
embedding_function=EMBEDDINGS,
client_settings=CHROMA_SETTINGS,
)
RETRIEVER = DB.as_retriever()
prompt, memory = get_prompt_template(promptTemplate_type="llama", history=False)
QA = RetrievalQA.from_chain_type(
llm=LLM,
chain_type="stuff",
retriever=RETRIEVER,
return_source_documents=SHOW_SOURCES,
chain_type_kwargs={
"prompt": prompt,
},
)
response = "Script executed successfully: {}".format(result.stdout.decode("utf-8"))
return {"response": response}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error occurred: {str(e)}")
@api_app.post("/save_document/")
async def create_upload_file(file: Union[UploadFile, None] = None):
try:
if not file:
raise HTTPException(status_code=400, detail="No upload file sent")
else:
if file.filename == "":
raise HTTPException(status_code=400, detail="No selected file")
if file:
filename = secure_filename(file.filename)
folder_path = "SOURCE_DOCUMENTS"
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(folder_path, filename)
file.save(file_path)
return {"response": "File saved successfully"}
except Exception as e:
raise HTTPException(status_code=400, detail=e)
@api_app.websocket("/ws")
async def websocket_endpoint(websocket: WebSocket):
await websocket.accept()
while True:
data = await websocket.receive_text()
await websocket.send_text(f"Message text was: {data}")