import numpy as np
import base64
import json
import os
import cv2
import logging
from threading import Thread, Event
import onnxruntime as ort
import time
import paho.mqtt.client as mqtt
from datetime import datetime
from paho.mqtt.client import CallbackAPIVersion
from picamera2 import Picamera2
import random

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("Video")

# MQTT 配置（针对 EMQX）
BROKER = "120.46.12.5"
PORT = 1883
TOPIC = "camera/encode_transfrom"
CLIENT_ID = "raspberry_camera"
USERNAME = "gll"
PASSWORD = "gll55555"

# MQTT 回调函数
def on_connect(client, userdata, flags, reason_code, properties):
    if reason_code == 0:
        logger.info("Connected to EMQX broker")
    else:
        logger.error(f"Failed to connect to EMQX, reason: {reason_code}")

def on_publish(client, userdata, mid, reason_code, properties):
    logger.info(f"Message {mid} published with reason code: {reason_code}")

client = mqtt.Client(client_id=CLIENT_ID, callback_api_version=CallbackAPIVersion.VERSION2)
client.username_pw_set(username=USERNAME, password=PASSWORD)
client.on_connect = on_connect
client.on_publish = on_publish
client.connect(BROKER, PORT, 60)
client.loop_start()

def plot_one_box(x, img, color=None, label=None, line_thickness=None):
    tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1
    color = color or [random.randint(0, 255) for _ in range(3)]
    c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3]))
    cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA)
    if label:
        tf = max(tl - 1, 1)
        t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]
        c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3
        cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA)
        cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255],
                    thickness=tf, lineType=cv2.LINE_AA)

def _make_grid(nx, ny):
    xv, yv = np.meshgrid(np.arange(ny), np.arange(nx))
    return np.stack((xv, yv), 2).reshape((-1, 2)).astype(np.float32)

def cal_outputs(outs, nl, na, model_w, model_h, anchor_grid, stride):
    row_ind = 0
    grid = [np.zeros(1)] * nl
    for i in range(nl):
        h, w = int(model_w / stride[i]), int(model_h / stride[i])
        length = int(na * h * w)
        if grid[i].shape[2:4] != (h, w):
            grid[i] = _make_grid(w, h)
        outs[row_ind:row_ind + length, 0:2] = (outs[row_ind:row_ind + length, 0:2] * 2. - 0.5 + np.tile(
            grid[i], (na, 1))) * int(stride[i])
        outs[row_ind:row_ind + length, 2:4] = (outs[row_ind:row_ind + length, 2:4] * 2) ** 2 * np.repeat(
            anchor_grid[i], h * w, axis=0)
        row_ind += length
    return outs

def post_process_opencv(outputs, model_h, model_w, img_h, img_w, thred_nms, thred_cond):
    conf = outputs[:, 4].tolist()
    c_x = outputs[:, 0] / model_w * img_w
    c_y = outputs[:, 1] / model_h * img_h
    w = outputs[:, 2] / model_w * img_w
    h = outputs[:, 3] / model_h * img_h
    p_cls = outputs[:, 5:]
    if len(p_cls.shape) == 1:
        p_cls = np.expand_dims(p_cls, 1)
    cls_id = np.argmax(p_cls, axis=1)

    p_x1 = np.expand_dims(c_x - w / 2, -1)
    p_y1 = np.expand_dims(c_y - h / 2, -1)
    p_x2 = np.expand_dims(c_x + w / 2, -1)
    p_y2 = np.expand_dims(c_y + h / 2, -1)
    areas = np.concatenate((p_x1, p_y1, p_x2, p_y2), axis=-1)

    areas = areas.tolist()
    ids = cv2.dnn.NMSBoxes(areas, conf, thred_cond, thred_nms)
    if len(ids) > 0:
        return np.array(areas)[ids], np.array(conf)[ids], cls_id[ids]
    else:
        return [], [], []

def infer_img(img0, net, model_h, model_w, nl, na, stride, anchor_grid, thred_nms=0.4, thred_cond=0.5):
    img = cv2.resize(img0, [model_w, model_h], interpolation=cv2.INTER_AREA)
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
    img = img.astype(np.float32) / 255.0
    blob = np.expand_dims(np.transpose(img, (2, 0, 1)), axis=0)

    outs = net.run(None, {net.get_inputs()[0].name: blob})[0].squeeze(axis=0)
    outs = cal_outputs(outs, nl, na, model_w, model_h, anchor_grid, stride)
    img_h, img_w, _ = np.shape(img0)
    boxes, confs, ids = post_process_opencv(outs, model_h, model_w, img_h, img_w, thred_nms, thred_cond)
    return boxes, confs, ids

class Video:
    def __init__(self):
        self.cap = Picamera2()
        self.stop_event = Event()
        self.frame_count = 0
        self.fps_start = time.time()
        self.fps = 0.0
        # YOLOv5 配置
        self.model_pb_path = "best.onnx"
        self.so = ort.SessionOptions()
        self.net = ort.InferenceSession(self.model_pb_path, self.so)
        self.dic_labels = {0: 'water'}
        self.model_h = 320
        self.model_w = 320
        self.nl = 3
        self.na = 3
        self.stride = [8., 16., 32.]
        self.anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
        self.anchor_grid = np.asarray(self.anchors, dtype=np.float32).reshape(self.nl, -1, 2)
        self.flag_det = False

    def setup(self):
        config = self.cap.create_video_configuration(
            main={"size": (320, 320)},
            controls={"FrameRate": 30.0}
        )
        self.cap.configure(config)
        self.cap.start()
        time.sleep(1)
        logger.info("Camera started")

    def calculate_fps(self):
        self.frame_count += 1
        elapsed_time = time.time() - self.fps_start
        if elapsed_time >= 1.0:
            self.fps = self.frame_count / elapsed_time
            self.frame_count = 0
            self.fps_start = time.time()

    def stream_to_mqtt(self):
        while not self.stop_event.is_set():
            frame = self.cap.capture_array("main")
            frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)

            # YOLOv5 检测
            if self.flag_det:
                t1 = time.time()
                det_boxes, scores, ids = infer_img(frame, self.net, self.model_h, self.model_w, self.nl, self.na,
                                                  self.stride, self.anchor_grid, thred_nms=0.4, thred_cond=0.1)
                t2 = time.time()
                for box, score, id in zip(det_boxes, scores, ids):
                    label = '%s:%.2f' % (self.dic_labels[id], score)
                    plot_one_box(box.astype(np.int16), frame, color=(255, 0, 0), label=label, line_thickness=None)
                cv2.putText(frame, f"YOLO FPS: {(1. / (t2 - t1)):.1f}", (50, 50), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 2)

            # 添加相机 FPS
            cv2.putText(frame, f"Camera FPS: {self.fps:.1f}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 1)

            # 显示本地窗口（在检测和绘制后）
            cv2.imshow("Local stream", frame)

            # 编码并发送到 MQTT
            _, buffer = cv2.imencode('.jpg', frame, [cv2.IMWRITE_JPEG_QUALITY, 80])
            image_base64 = base64.b64encode(buffer).decode('utf-8')
            payload = {"image": image_base64, "timestamp": datetime.now().strftime('%Y-%m-%dT%H:%M:%S')}
            json_payload = json.dumps(payload)
            client.publish(TOPIC, json_payload, qos=0)
            self.calculate_fps()
            time.sleep(0.066)

    def run(self):
        self.setup()
        stream_thread = Thread(target=self.stream_to_mqtt, daemon=True)
        stream_thread.start()

        try:
            while not self.stop_event.is_set():
                key = cv2.waitKey(1)
                if key == ord('s'):
                    self.flag_det = not self.flag_det
                    logger.info(f"Detection: {self.flag_det}")
                    time.sleep(0.2)
                elif key == ord('q'):
                    self.stop_event.set()
                time.sleep(0.01)
        except KeyboardInterrupt:
            print("\nStopped by user")
        finally:
            self.stop_event.set()
            self.cap.stop()
            client.loop_stop()
            client.disconnect()
            cv2.destroyAllWindows()
            logger.info("Camera and MQTT stopped")

if __name__ == "__main__":
    video = Video()
    video.run()