import logging

logging.basicConfig(filename='llm.log', level=logging.DEBUG, format='%(asctime)s:%(levelname)s:%(message)s')

from fastapi import FastAPI, UploadFile, File, Form
from llmwrapper import llmwrapper

_llmwrapper = None
async def lifespan(app:FastAPI):
    # 加载大模型
    logging.info("Starting lifespan")
    global _llmwrapper
    # _llmwrapper = llmwrapper("/opt/ai/llm/models/Qwen-VL-Chat")
    try:
        #_llmwrapper = llmwrapper("/opt/ai/llm/models/Qwen2-57B-A14B-Instruct")
        _llmwrapper = llmwrapper("/opt/ai/llm/models/Qwen2-72B-Instruct-GPTQ-Int8")
    except Exception as e:
        logging.error(e)

    yield
    # 释放大模型
    logging.info("Stopping lifespan")

app = FastAPI(lifespan=lifespan)

@app.get("/")
async def root():
    return {"message": "Hello World"}

@app.post("/predict")
async def predict(file:UploadFile|None, text:str=Form()):
    try:
        # 图片上传
        filepath = None
        if file is not None:
            logging.info("Image uploaded")
            filepath = "/opt/ai/yzy/images/image.jpg"
            contents = await file.read()
            with open(filepath, "wb") as f:
                f.write(contents)

        # 模型预测
        global _llmwrapper
        if _llmwrapper is None:
            return {"code": 1, "msg": "Model not loaded"}
        
        #首先需要保存图片
        response, inputs =_llmwrapper.chat(text, filepath)
        return {"code": 0, "msg": "success", "data": {"inputs": inputs, "response": response}}
    
    except Exception as e:
        logging.error(e)
        return {"code": 1, "msg": str(e)}
        

@app.post("/chat")
async def chat(text:str=Form()):
    try:
        # 模型预测
        print(text)
        global _llmwrapper
        if _llmwrapper is None:
            return {"code": 1, "msg": "Model not loaded"}
        
        #首先需要保存图片
        # response, inputs =_llmwrapper.chat(text, None)
        response, inputs =_llmwrapper.chat2(text)
        return {"code": 0, "msg": "success", "data": {"inputs": inputs, "response": response}}
    except Exception as e:
        logging.error(e)
        return {"code": 1, "msg": str(e)}

@app.get("/newChat")
async def newChat():
    # 模型预测
    global _llmwrapper
    if _llmwrapper is None:
        return {"code": 1, "msg": "Model not loaded"}
    
    _llmwrapper.newChat()
    return {"code":0, "msg":"success"}

def main():
    from uvicorn import run
    run(app, host="0.0.0.0", port=8000)

if __name__ == "__main__":
    main()
