import os
import queue
import threading
import base64
import zipfile
from io import BytesIO
import io
import logging
import time

import cv2
import numpy as np
from PIL import Image, ImageOps
from tflite_runtime.interpreter import Interpreter
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse, StreamingResponse
from fastapi import FastAPI, WebSocket, WebSocketDisconnect, File, UploadFile, HTTPException, Request

from apps import led, servo, motor

app = FastAPI()

# 注册对应的硬件app
app.include_router(led.router)      # led灯
app.include_router(servo.router)    # 舵机
app.include_router(motor.router)    # 电机

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s [%(levelname)s] %(name)s: %(message)s',
    handlers=[
        logging.FileHandler("app.log"),  # 输出到文件
        logging.StreamHandler()          # 输出到控制台
    ]
)
logger = logging.getLogger("FastAPI-Logger")

# 允许跨域访问
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],  # 设置为 "*" 允许所有来源访问
    allow_methods=["*"],
    allow_headers=["*"],
    allow_credentials=True,
    expose_headers = {"Content-Disposition"},

)
# 全局变量
camera = None
camera_number = 9  # 在泰山派为9，默认摄像头为0
model = None
labels = None

latest_frame = None
capture_thread = None  # 用来存储摄像头捕获线程
capture_event = threading.Event()  # 控制捕获线程的停止
# 创建一个队列用于传递图像帧
frame_queue = queue.Queue()

# 全局变量存储加载的模型
interpreter = None
input_details = None
output_details = None

# 定义 GStreamer 管道，使用设备 /dev/video9
gst_pipeline = (
    f"v4l2src device=/dev/video{camera_number} ! "
    "image/jpeg, width=640, height=480, framerate=15/1 ! "
    "jpegdec ! videoconvert ! appsink"
)

# @app.middleware("http")
# def log_requests(request: Request, call_next):
#     """日志中间件记录每个请求"""
#     start_time = time.time()
#     logger.info(f"开始处理请求: {request.method} {request.url}")
#
#     response = await call_next(request)
#
#     process_time = time.time() - start_time
#     logger.info(f"请求完成: {request.method} {request.url} 状态码: {response.status_code} 耗时: {process_time:.3f}s")


def init():
    """ 初始化 """
    global camera, model, labels
    if camera and camera.isOpened():
        camera.release()
    camera, model, labels = None, None, None


@app.on_event("shutdown")
def shutdown():
    global camera
    if camera is not None and camera.isOpened():
        camera.release()  # 关闭摄像头


@app.get("/", summary='测试连接是否可用')
def ping():
    """ ping判断是否能访问 """
    return {'message': 'pong'}


@app.post("/upload_model/", summary='上传模型的zip压缩包', description='要求有keras_model.tflite和labels.txt文件')
async def upload_model(file: UploadFile = File(...)):
    """上传模型的压缩包，并解压到当前目录"""
    if not file.filename.endswith(".zip"):
        message = '必须上传.zip文件'
        return JSONResponse(status_code=400, content={'message': message})

    # 用于存放解压后文件的目录
    upload_dir = './model'
    try:
        # 确保存放文件的目录存在
        os.makedirs(upload_dir, exist_ok=True)

        # 读取上传的 ZIP 文件
        contents = await file.read()

        # 使用 BytesIO 将文件内容加载为一个内存中的二进制流
        with zipfile.ZipFile(BytesIO(contents)) as zip_ref:
            # 查找是否包含 labels.txt 文件
            if 'labels.txt' not in zip_ref.namelist() or 'keras_model.tflite' not in zip_ref.namelist():
                message = '压缩包中必须有 keras_model.tflite 和 labels.txt 文件'
                return JSONResponse(status_code=400, content={'message': message})
            # 解压并覆盖文件
            zip_ref.extract('labels.txt', path=upload_dir)
            zip_ref.extract('keras_model.tflite', path=upload_dir)

    except Exception as e:
        return JSONResponse(content={"error": str(e)}, status_code=500)

    return {"message": "模型和标签文件上传并加载成功"}


@app.get("/open_camera/", summary='打开摄像头')
def open_camera():
    """打开摄像头"""
    global camera, camera_number
    if camera is not None and camera.isOpened():
        return {"message": "摄像头已成功打开"}

    # camera = cv2.VideoCapture(camera_number)  # 打开摄像头, 泰山派摄像头是9
    # camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640)  # 设置宽度
    # camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)  # 设置高度
    # camera.set(cv2.CAP_PROP_FPS, 15)           # 设置fps
    camera = cv2.VideoCapture(gst_pipeline, cv2.CAP_GSTREAMER)
    if not camera.isOpened():
        message = '无法打开摄像头'
        return JSONResponse(status_code=400, content={'message': message})

    return {"message": "摄像头已成功打开"}
# @app.get("/open_camera/", summary='打开摄像头')
# def open_camera():
#     """打开摄像头"""
#     global camera, camera_number, latest_frame, capture_thread, capture_event
#     if camera is not None and camera.isOpened():
#         return {"message": "摄像头已成功打开"}
#
#     camera = cv2.VideoCapture(gst_pipeline, cv2.CAP_GSTREAMER)
#     if not camera.isOpened():
#         message = '无法打开摄像头'
#         return JSONResponse(status_code=400, content={'message': message})
#
#     # 重置停止标志
#     capture_event.clear()  # 清除事件，使线程继续运行
#
#     # 定义线程的任务
#     def capture_frames():
#         while camera.isOpened() and not capture_event.is_set():  # 当事件未被触发时，线程持续捕获帧
#             ret, frame = camera.read()
#             if ret:
#                 # 将捕获的帧放入队列
#                 frame_queue.put(frame)
#
#     # 创建并启动线程
#     capture_thread = threading.Thread(target=capture_frames, daemon=True)
#     capture_thread.start()
#
#     return {"message": "摄像头已成功打开"}


@app.get("/close_camera/", summary='关闭摄像头')
def close_camera():
    """关闭摄像头"""
    global camera, model, labels
    if camera is None or not camera.isOpened():
        return {"message": "摄像头已成功关闭"}

    camera.release()
    camera, model, labels = None, None, None
    return {"message": "摄像头已成功关闭"}
# @app.get("/close_camera/", summary='关闭摄像头')
# def close_camera():
#     """关闭摄像头"""
#     global camera, capture_thread, capture_event, model, labels
#     if camera is None or not camera.isOpened():
#         return {"message": "摄像头已成功关闭"}
#
#     # 设置事件，停止线程
#     capture_event.set()
#
#     # 等待线程完全停止
#     if capture_thread is not None:
#         capture_thread.join()
#
#     # 释放摄像头资源
#     camera.release()
#     camera, model, labels = None, None, None
#     return {"message": "摄像头已成功关闭"}


def process_frame(image):
    """对摄像头捕获的帧进行预处理并预测"""
    global interpreter, labels

    # 转换为 PIL 图像进行处理
    image = Image.fromarray(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)).convert("RGB")
    # 预处理输入图像
    size = (224, 224)
    image = ImageOps.fit(image, size, Image.LANCZOS)
    # 转换为 numpy 数组并归一化
    image_array = np.asarray(image)
    normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1
    # 将图像数据传入模型
    input_data = np.expand_dims(normalized_image_array, axis=0).astype(np.float32)
    interpreter.set_tensor(input_details[0]['index'], input_data)

    # Run inference
    interpreter.invoke()

    # Get the output tensor
    output_data = interpreter.get_tensor(output_details[0]['index'])
    # Get the class index with the highest score
    index = np.argmax(output_data)

    # Extract class name and confidence score
    class_name = labels[index].split(' ', 1)[1]  # 格式为:0 apple，按空格分割一次即可获取类名
    confidence_score = output_data[0][index]
    confidence_score_rate = str(np.round(confidence_score * 100))[:-2]

    print(class_name, confidence_score_rate)
    return {"class": class_name, "confidence": confidence_score_rate}


@app.get("/load_model/", summary='加载模型')
def load_model_files():
    """加载模型和标签文件"""
    global interpreter, input_details, output_details, labels

    # 检查模型文件和标签文件是否存在
    if not os.path.exists("./model/keras_model.tflite"):
        message = '模型文件 keras_model.tflite 不存在'
        return JSONResponse(status_code=400, content={'message': message})
    if not os.path.exists("./model/labels.txt"):
        message = '标签文件 labels.txt 不存在'
        return JSONResponse(status_code=400, content={'message': message})

    try:
        interpreter = Interpreter(model_path="model/keras_model.tflite")
    except Exception as e:
        message = '加载失败：错误的模型'
        return JSONResponse(status_code=400, content={'message': message})

    interpreter.allocate_tensors()

    input_details = interpreter.get_input_details()
    output_details = interpreter.get_output_details()
    # 加载标签文件
    with open("./model/labels.txt", "r") as file:
        labels = [line.strip() for line in file.readlines()]

    return {"message": "模型和标签文件加载成功"}


# @app.get("/capture_photo", summary='使用摄像头拍照，返回照片的二进制')
# def capture_photo():
#     global camera, camera_number
#     # if camera is None or not camera.isOpened():
#     #     message = '摄像头未打开'
#     #     return JSONResponse(status_code=400, content={'message': message})
#
#     # 拍照, 摄像头有延迟，因此每次需要先打开摄像头拍照，再关闭摄像头
#     camera_tmp = cv2.VideoCapture(camera_number)  # 打开摄像头, 泰山派摄像头是9
#     if not camera_tmp.isOpened():
#         message = '无法打开摄像头'
#         return JSONResponse(status_code=400, content={'message': message})
#     ret, frame = camera_tmp.read()
#     if not ret:
#         message = '摄像头未能捕获图像'
#         return JSONResponse(status_code=400, content={'message': message})
#     camera_tmp.release()
#
#     # 将图片从 OpenCV 格式转换为 JPEG 格式
#     _, img_encoded = cv2.imencode('.jpg', frame)
#
#     # 转换为字节流
#     img_bytes = img_encoded.tobytes()
#
#     # 返回图片作为流
#     return StreamingResponse(io.BytesIO(img_bytes), media_type="image/jpeg")


@app.get("/predict", summary='进行分类检测')
def predict():
    """提供分类检测的结果"""
    global interpreter, labels, camera

    if interpreter is None or labels is None:
        message = '模型或标签文件未加载'
        return JSONResponse(status_code=400, content={'message': message})

    if not camera or not camera.isOpened():
        message = '摄像头未打开'
        return JSONResponse(status_code=400, content={'message': message})

    # # 丢弃当前缓冲区中的帧，确保获取最新的图像
    # camera.grab()
    #
    # ret, frame = camera.read()  # 捕获帧
    # if not ret:
    #     message = '摄像头未能捕获图像'
    #     return JSONResponse(status_code=400, content={'message': message})
    if frame_queue.empty():
        message = '摄像头未捕获到图像'
        return JSONResponse(status_code=400, content={'message': message})
    frame = frame_queue.get()

    result = process_frame(frame)
    # 将图片从 OpenCV 格式转换为 JPEG 格式
    _, img_encoded = cv2.imencode('.jpg', frame)
    # 将图像字节流编码为 Base64 字符串
    img_bytes = img_encoded.tobytes()
    img_base64 = base64.b64encode(img_bytes).decode('utf-8')

    result['image'] = img_base64

    return result


@app.post("/predict_img", summary='对图片进行分类检测')
async def predict_img(file: UploadFile = File(...)):
    """接收上传的图片并进行分类检测"""
    global interpreter, labels

    # 检查模型和标签是否已加载
    if interpreter is None or labels is None:
        message = '模型或标签文件未加载'
        return JSONResponse(status_code=400, content={'message': message})

    # 读取上传的文件并转换为 OpenCV 图像格式
    image_bytes = await file.read()
    image_array = np.frombuffer(image_bytes, np.uint8)
    frame = cv2.imdecode(image_array, cv2.IMREAD_COLOR)

    if frame is None:
        message = '无法读取上传的图像'
        return JSONResponse(status_code=400, content={'message': message})

    # 调用分类处理函数
    result = process_frame(frame)
    return result

def generate_video_stream(camera_tmp):
    while True:
        success, frame = camera_tmp.read()
        if not success:
            break
        # 编码为 JPEG 格式
        _, encoded_frame = cv2.imencode(".jpg", frame)
        # 通过 yield 返回每一帧
        yield (b"--frame\r\n"
               b"Content-Type: image/jpeg\r\n\r\n" +
               bytearray(encoded_frame) +
               b"\r\n")


@app.get("/video-stream")
def video_stream():
    """
    返回摄像头的实时视频流
    """
    global camera
    if not camera or not camera.isOpened():
        message = '摄像头未打开'
        return JSONResponse(status_code=400, content={'message': message})

    return StreamingResponse(generate_video_stream(camera),
                             media_type="multipart/x-mixed-replace; boundary=frame")


if __name__ == '__main__':
    import uvicorn

    uvicorn.run(app, host='0.0.0.0', port=18888)
