import paho.mqtt.client as mqtt
from paho.mqtt.client import CallbackAPIVersion
import json
import time
import numpy as np
import onnxruntime as ort
import cv2
from picamera2 import Picamera2
import random
from datetime import datetime

BROKER = "120.46.12.5"
PORT = 1883
TOPIC = "yolov5/data"
CLIENT_ID = "pi"
USERNAME = "gll"
PASSWORD = "gll55555"

# mqtt连接时的回调函数
def on_connect(client, user, flags, rc):
    if rc == 0:
        print("连接mqtt成功")
    else:
        print(f"连接mqtt失败,{rc}")

client = mqtt.Client(client_id=CLIENT_ID, callback_api_version=CallbackAPIVersion.VERSION2)
client.username_pw_set(username=USERNAME, password=PASSWORD)
client.on_connect = on_connect
client.connect(BROKER, PORT, 60)
# 异步处理MQTT网络通信
client.loop_start()


# YOLOv5-Lite 绘图函数
def plot_one_box(x, img, color=None, label=None, line_thickness=None):
    tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
    color = color or [random.randint(0, 255) for _ in range(3)]
    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
    cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
    if label:
        tf = max(tl - 1, 1)
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)
        cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255],
                    thickness=tf, lineType=cv2.LINE_AA)

# YOLOv5-Lite 网格生成函数
def _make_grid(nx, ny):
    xv, yv = np.meshgrid(np.arange(ny), np.arange(nx))
    return np.stack((xv, yv), 2).reshape((-1, 2)).astype(np.float32)

# YOLOv5-Lite 输出计算函数
def cal_outputs(outs, nl, na, model_w, model_h, anchor_grid, stride):
    row_ind = 0
    grid = [np.zeros(1)] * nl
    for i in range(nl):
        h, w = int(model_w / stride[i]), int(model_h / stride[i])
        length = int(na * h * w)
        if grid[i].shape[2:4] != (h, w):
            grid[i] = _make_grid(w, h)
        outs[row_ind:row_ind + length, 0:2] = (outs[row_ind:row_ind + length, 0:2] * 2. - 0.5 + np.tile(
            grid[i], (na, 1))) * int(stride[i])
        outs[row_ind:row_ind + length, 2:4] = (outs[row_ind:row_ind + length, 2:4] * 2) ** 2 * np.repeat(
            anchor_grid[i], h * w, axis=0)
        row_ind += length
    return outs

# YOLOv5-Lite 后处理函数
def post_process_opencv(outputs, model_h, model_w, img_h, img_w, thred_nms, thred_cond):
    conf = outputs[:, 4].tolist()
    c_x = outputs[:, 0] / model_w * img_w
    c_y = outputs[:, 1] / model_h * img_h
    w = outputs[:, 2] / model_w * img_w
    h = outputs[:, 3] / model_h * img_h
    p_cls = outputs[:, 5:]
    if len(p_cls.shape) == 1:
        p_cls = np.expand_dims(p_cls, 1)
    cls_id = np.argmax(p_cls, axis=1)

    p_x1 = np.expand_dims(c_x - w / 2, -1)
    p_y1 = np.expand_dims(c_y - h / 2, -1)
    p_x2 = np.expand_dims(c_x + w / 2, -1)
    p_y2 = np.expand_dims(c_y + h / 2, -1)
    areas = np.concatenate((p_x1, p_y1, p_x2, p_y2), axis=-1)

    areas = areas.tolist()
    ids = cv2.dnn.NMSBoxes(areas, conf, thred_cond, thred_nms)
    if len(ids) > 0:
        return np.array(areas)[ids], np.array(conf)[ids], cls_id[ids]
    else:
        return [], [], []

# YOLOv5-Lite 推理函数
def infer_img(img0, net, model_h, model_w, nl, na, stride, anchor_grid, thred_nms=0.4, thred_cond=0.5):
    img = cv2.resize(img0, [model_w, model_h], interpolation=cv2.INTER_AREA)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = img.astype(np.float32) / 255.0
    blob = np.expand_dims(np.transpose(img, (2, 0, 1)), axis=0)

    outs = net.run(None, {net.get_inputs()[0].name: blob})[0].squeeze(axis=0)
    outs = cal_outputs(outs, nl, na, model_w, model_h, anchor_grid, stride)
    img_h, img_w, _ = np.shape(img0)
    boxes, confs, ids = post_process_opencv(outs, model_h, model_w, img_h, img_w, thred_nms, thred_cond)
    return boxes, confs, ids

if __name__ == "__main__":
    # YOLOv5-Lite 模型配置
    model_pb_path = "best.onnx"
    so = ort.SessionOptions()
    net = ort.InferenceSession(model_pb_path, so)
    dic_labels = {0: 'water'}
    model_h = 320
    model_w = 320
    nl = 3
    na = 3
    stride = [8., 16., 32.]
    anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
    anchor_grid = np.asarray(anchors, dtype=np.float32).reshape(nl, -1, 2)

    # 初始化 PiCamera2
    picam2 = Picamera2()
    picam2.configure(picam2.create_video_configuration(
        main={"size": (320, 320)},
        controls={"FrameRate": 30.0}
    ))
    picam2.start()
    time.sleep(1)

    flag_det = False
    fps_start = time.time()
    frame_count = 0
    fps = 0.0

    try:
        while True:
            array = picam2.capture_array("main")
            img0 = cv2.cvtColor(array, cv2.COLOR_RGB2BGR)

            frame_count += 1
            elapsed_time = time.time() - fps_start
            if elapsed_time >= 1.0:
                fps = frame_count / elapsed_time
                frame_count = 0
                fps_start = time.time()

            detection_results = []
            if flag_det:
                t1 = time.time()
                det_boxes, scores, ids = infer_img(img0, net, model_h, model_w, nl, na, stride, anchor_grid, thred_nms=0.4, thred_cond=0.1)
                t2 = time.time()

                # 格式化检测结果
                for box, score, id in zip(det_boxes, scores, ids):
                    label = dic_labels[id]
                    detection = {
                        "class": label,
                        "confidence": float(score),
                        "box": [float(box[0]), float(box[1]), float(box[2]), float(box[3])]
                    }
                    detection_results.append(detection)

                    # 在图像上绘制检测框
                    label_text = '%s:%.2f' % (label, score)
                    plot_one_box(box.astype(np.int16), img0, color=(255, 0, 0), label=label_text, line_thickness=None)

                str_FPS = "FPS: %.2f" % (1. / (t2 - t1))
                cv2.putText(img0, str_FPS, (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 3)

            # 将检测结果发送到 MQTT
            if detection_results:
                payload = {
                    "timestamp": datetime.utcfromtimestamp(time.time()).strftime('%Y%m%d_%H%M%S'),
                    "detections": detection_results
                }
                payload_json = json.dumps(payload)
                client.publish(TOPIC, payload_json, qos=1)
                print(f"已发送数据: {payload_json}")

            cv2.putText(img0, f"Camera FPS: {fps:.1f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)
            cv2.imshow("video", img0)

            key = cv2.waitKey(1) & 0xFF
            if key == ord('q'):
                break
            elif key == ord('s'):
                flag_det = not flag_det
                print(f"Detection: {flag_det}")

    except KeyboardInterrupt:
        print("\nStopped by user")
    finally:
        picam2.stop()
        cv2.destroyAllWindows()
        client.loop_stop()
        client.disconnect()
        print("Camera, windows, and MQTT closed")
