from fastapi import FastAPI, WebSocket, WebSocketDisconnect, File, UploadFile, HTTPException
# from keras.models import load_model
from tflite_runtime.interpreter import Interpreter
import cv2
import numpy as np
from fastapi.middleware.cors import CORSMiddleware
import os
import zipfile
from PIL import Image, ImageOps
from io import BytesIO
from fastapi.responses import JSONResponse

app = FastAPI()

# 允许跨域访问
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 设置为 "*" 允许所有来源访问
    allow_methods=["*"],
    allow_headers=["*"],
)
# 全局变量
camera = None
model = None
labels = None
# Disable scientific notation for clarity
np.set_printoptions(suppress=True)

# 全局变量存储加载的模型
interpreter = None
input_details = None
output_details = None


def init():
    """ 初始化 """
    global camera, model, labels
    if camera and camera.isOpened():
        camera.release()
    camera, model, labels = None, None, None


@app.on_event("shutdown")
async def shutdown():
    global camera
    if camera is not None and camera.isOpened():
        camera.release()  # 关闭摄像头


@app.get("/", summary='测试连接是否可用')
async def ping():
    """ ping判断是否能访问 """
    return {'message': 'pong'}


@app.post("/upload_model/", summary='上传模型的zip压缩包', description='要求有keras_model.h5和labels.txt文件')
async def upload_model(file: UploadFile = File(...)):
    """上传模型的压缩包，并解压到当前目录"""
    if not file.filename.endswith(".zip"):
        raise HTTPException(status_code=400, detail="必须上传.zip文件")

    # 用于存放解压后文件的目录
    upload_dir = './model'
    try:
        # 确保存放文件的目录存在
        os.makedirs(upload_dir, exist_ok=True)

        # 读取上传的 ZIP 文件
        contents = await file.read()

        # 使用 BytesIO 将文件内容加载为一个内存中的二进制流
        with zipfile.ZipFile(BytesIO(contents)) as zip_ref:
            # 查找是否包含 labels.txt 文件
            if 'labels.txt' not in zip_ref.namelist() or 'keras_model.h5' not in zip_ref.namelist():
                message = '压缩包中缺少 keras_model.h5 或 labels.txt 文件'
                return JSONResponse(status_code=400, content={'message': message})
            # 解压并覆盖文件
            zip_ref.extract('labels.txt', path=upload_dir)
            zip_ref.extract('keras_model.h5', path=upload_dir)

    except Exception as e:
        return JSONResponse(content={"error": str(e)}, status_code=500)

    # # 保存压缩包并解压
    # zip_path = "uploaded_model.zip"
    # with open(zip_path, "wb") as f:
    #     f.write(await file.read())
    #
    # with zipfile.ZipFile(zip_path, 'r') as zip_ref:
    #     zip_ref.extractall(".")
    #
    # os.remove(zip_path)  # 删除压缩包
    #
    # # 检查必要文件
    # if not os.path.exists("keras_model.h5") or not os.path.exists("labels.txt"):
    #     message = '压缩包中缺少 keras_model.h5 或 labels.txt 文件'
    #     return JSONResponse(status_code=400, content={'message': message})

    return {"message": "模型和标签文件上传并加载成功"}


@app.get("/open_camera/", summary='打开摄像头')
async def open_camera():
    """打开摄像头"""
    global camera
    if camera is not None and camera.isOpened():
        return {"message": "摄像头已成功打开"}

    camera = cv2.VideoCapture(0)  # 打开默认摄像头
    if not camera.isOpened():
        message = '无法打开摄像头'
        return JSONResponse(status_code=400, content={'message': message})

    return {"message": "摄像头已成功打开"}


@app.get("/close_camera/", summary='关闭摄像头')
async def close_camera():
    """关闭摄像头"""
    global camera, model, labels
    if camera is None or not camera.isOpened():
        return {"message": "摄像头已成功关闭"}

    camera.release()
    camera, model, labels = None, None, None
    return {"message": "摄像头已成功关闭"}


def process_frame(image):
    """对摄像头捕获的帧进行预处理并预测"""
    global interpreter, labels

    # # Resize the raw image into (224-height,224-width) pixels
    # image = cv2.resize(image, (224, 224), interpolation=cv2.INTER_AREA)
    # # Make the image a numpy array and reshape it to the models input shape.
    # image = np.asarray(image, dtype=np.float32)
    # # Normalize the image array
    # image = (image / 127.5) - 1
    # # Add a batch dimension (shape becomes [1, 224, 224, 3])
    # image = np.expand_dims(image, axis=0)
    # # Set the input tensor
    # interpreter.set_tensor(input_details[0]['index'], image)

    # 转换为 PIL 图像进行处理
    image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)).convert("RGB")
    # 预处理输入图像
    size = (224, 224)
    image = ImageOps.fit(image, size, Image.LANCZOS)
    # 转换为 numpy 数组并归一化
    image_array = np.asarray(image)
    normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1
    # 将图像数据传入模型
    input_data = np.expand_dims(normalized_image_array, axis=0).astype(np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)

    # Run inference
    interpreter.invoke()

    # Get the output tensor
    output_data = interpreter.get_tensor(output_details[0]['index'])
    # Get the class index with the highest score
    index = np.argmax(output_data)

    # Extract class name and confidence score
    class_name = labels[index].split(' ', 1)[1]     # 格式为:0 apple，按空格分割一次即可获取类名
    confidence_score = output_data[0][index]
    confidence_score_rate = str(np.round(confidence_score * 100))[:-2]

    print(class_name, confidence_score_rate)
    return {"class": class_name, "confidence": confidence_score_rate}

    # # Predicts the model
    # prediction = model.predict(image)
    # index = np.argmax(prediction)
    # line = labels[index]
    # class_name = line.split(' ', 1)[1]     # 格式为:0 apple，按空格分割一次即可获取类名
    # confidence_score = prediction[0][index]
    # confidence_score_rate = str(np.round(confidence_score * 100))[:-2]
    #
    # print(class_name, confidence_score_rate)
    # return {"class": class_name, "confidence": confidence_score_rate}

    # # 预处理图像
    # input_size = model.input_shape[1:3]  # 获取模型输入的尺寸
    # frame_resized = cv2.resize(image, input_size)
    # frame_normalized = frame_resized / 255.0  # 归一化
    # frame_expanded = np.expand_dims(frame_normalized, axis=0)
    #
    # # 模型预测
    # predictions = model.predict(frame_expanded)
    # predicted_index = np.argmax(predictions)
    # predicted_label = labels[predicted_index]
    # confidence = predictions[0][predicted_index]
    #
    # return {
    #     "class": predicted_label,
    #     "confidence": float(confidence)
    # }


@app.get("/load_model/", summary='加载模型')
async def load_model_files():
    """加载模型和标签文件"""
    global model, labels

    # 检查模型文件和标签文件是否存在
    if not os.path.exists("./model/keras_model.h5"):
        message = '模型文件 keras_model.h5 不存在'
        return JSONResponse(status_code=400, content={'message': message})
    if not os.path.exists("./model/labels.txt"):
        message = '标签文件 labels.txt 不存在'
        return JSONResponse(status_code=400, content={'message': message})

    # # 加载模型
    # model = load_model("./model/keras_model.h5")
    # # 加载标签文件
    # with open("./model/labels.txt", "r") as file:
    #     labels = [line.strip() for line in file.readlines()]

    global interpreter, input_details, output_details
    interpreter = Interpreter(model_path="model/keras_model.tflite")
    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()
    print("模型加载完成")
    # 加载标签文件
    with open("./model/labels.txt", "r") as file:
        labels = [line.strip() for line in file.readlines()]

    return {"message": "模型和标签文件加载成功"}


@app.get("/predict", summary='进行分类检测')
async def predict():
    """提供分类检测的结果"""
    global camera, interpreter, labels
    if interpreter is None or labels is None:
        message = '模型或标签文件未加载'
        return JSONResponse(status_code=400, content={'message': message})

    if camera is None or not camera.isOpened():
        message = '摄像头未打开'
        return JSONResponse(status_code=400, content={'message': message})

    ret, frame = camera.read()  # 捕获帧
    if not ret:
        message = '摄像头未能捕获图像'
        return JSONResponse(status_code=400, content={'message': message})

    result = process_frame(frame)
    return result


if __name__ == '__main__':
    import uvicorn

    uvicorn.run(app, host='0.0.0.0', port=18888)
