import cv2
import threading
import time
import numpy as np
from flask import Flask, Response, render_template, request, jsonify
import os
from enum import Enum
import RivsInference

class COCOLabels(Enum):
    person = 0
    bicycle = 1
    car = 2
    motorbike = 3
    aeroplane = 4
    bus = 5
    train = 6
    truck = 7
    boat = 8
    traffic_light = 9
    fire_hydrant = 10
    stop_sign = 11
    parking_meter = 12
    bench = 13
    bird = 14
    cat = 15
    dog = 16
    horse = 17
    sheep = 18
    cow = 19
    elephant = 20
    bear = 21
    zebra = 22
    giraffe = 23
    backpack = 24
    umbrella = 25
    handbag = 26
    tie = 27
    suitcase = 28
    frisbee = 29
    skis = 30
    snowboard = 31
    sports_ball = 32
    kite = 33
    baseball_bat = 34
    baseball_glove = 35
    skateboard = 36
    surfboard = 37
    tennis_racket = 38
    bottle = 39
    wine_glass = 40
    cup = 41
    fork = 42
    knife = 43
    spoon = 44
    bowl = 45
    banana = 46
    apple = 47
    sandwich = 48
    orange = 49
    broccoli = 50
    carrot = 51
    hot_dog = 52
    pizza = 53
    donut = 54
    cake = 55
    chair = 56
    sofa = 57
    pottedplant = 58
    bed = 59
    diningtable = 60
    toilet = 61
    tvmonitor = 62
    laptop = 63
    mouse = 64
    remote = 65
    keyboard = 66
    cell_phone = 67
    microwave = 68
    oven = 69
    toaster = 70
    sink = 71
    refrigerator = 72
    book = 73
    clock = 74
    vase = 75
    scissors = 76
    teddy_bear = 77
    hair_drier = 78
    toothbrush = 79


class RTSPStream:
    def __init__(self, rtsp_url):
        self.rtsp_url = rtsp_url
        self.frame = None
        self.lock = threading.Lock()
        self.stopped = False
        self.thread = None

    def start(self):
        self.thread = threading.Thread(target=self._capture_frames, daemon=True)
        self.thread.start()
        return self

    def _capture_frames(self):
        cap = cv2.VideoCapture(self.rtsp_url)

        if not cap.isOpened():
            print(f"无法打开RTSP流: {self.rtsp_url}")
            self.stopped = True
            return

        while not self.stopped:
            ret, frame = cap.read()
            if not ret:
                print("无法获取帧，尝试重新连接...")
                cap.release()
                time.sleep(1)
                cap = cv2.VideoCapture(self.rtsp_url)
                continue

            with self.lock:
                self.frame = frame

            time.sleep(0.01)

        cap.release()

    def read(self):
        with self.lock:
            return self.frame.copy() if self.frame is not None else None

    def stop(self):
        self.stopped = True
        if self.thread:
            self.thread.join(timeout=2.0)


class YOLOXDetector:
    def __init__(self, onnx_file='yolox_s.onnx', device=None, conf_thres=0.3, iou_thres=0.45):
        # 设备与引擎准备
        card_id = 0
        cluster_ids = [0]

        engine_path = os.path.join(os.path.dirname(onnx_file), os.path.basename(onnx_file) + ".exec")
        print("input onnx_file: ", onnx_file, ", engine_path: ", engine_path)
        self.handler = RivsInference.set_device(card_id, cluster_ids)

        parser = RivsInference.create_parser(RivsInference.ONNX_MODEL)
        # YOLOX 常用输入/输出名字也常见为 images/output，保持与 yolov5_web.py 一致，便于统一
        parser.set_input_names(['images'])
        parser.set_output_names(['output'])
        parser.set_input_shapes(['1,3,640,640'])
        parser.set_input_dtypes(RivsInference.DT_FLOAT32)

        if not os.path.exists(engine_path):
            network = parser.read(onnx_file)
            optimizer = RivsInference.create_optimizer()
            optimizer.set_build_flag(RivsInference.KFP16_MIX)
            self.engine = optimizer.build(network)
            self.engine.save_executable(engine_path)
        else:
            self.engine = RivsInference.load(engine_path)

        assert self.engine is not None

        self.name = "yolox"
        self.device = device
        print(f"模型运行在设备: {device}")

        # 输入规格
        self.normH = 640
        self.normW = 640
        self.im_scale = None

        # 阈值
        self.conf_thres = conf_thres
        self.iou_thres = iou_thres

        # 类别
        self.class_names = {item.value: item.name for item in COCOLabels}
        print(f"已加载 {len(self.class_names)} 个类别")

        self.colors = [[np.random.randint(0, 255) for _ in range(3)] for _ in self.class_names]
        self.display_classes = list(range(len(self.class_names)))
        # 缓存 YOLOX 网格
        self._grids_cached = None  # (num, 2)
        self._strides_cached = None  # (num, 1)
        self._decode_needed = True  # 是否需要网格解码

    def destroy(self):
        RivsInference.release_device(self.handler)

    def detect_images(self, images):
        inputs = []
        outputs = []

        for image in images:
            processed_img = self.preprocess(image)
            processed_img = np.around(processed_img, decimals=5)
            inputs.append(processed_img)

        py_future = self.engine.run_with_batch(
            sample_nums=len(images),
            input_list=np.array([inputs]),
            buffer_type=RivsInference.TIF_ENGINE_RSC_IN_HOST_OUT_HOST,
        )

        pred = py_future.get()
        pred = np.asarray(pred)
        # 兼容多种导出形状: (N, num, 85) / (N, 1, num, 85) / (1, num, 85)
        while pred.ndim > 3:
            pred = pred[0]
        if pred.ndim == 2:
            pred = pred[None, ...]
        outputs = pred  # shape: (N, num, 85)

        results = []
        for index, image in enumerate(images):
            image_shape = image.shape
            result = self.postprocess(outputs[index], image_shape[1], image_shape[0])
            results.append(result)

        return results

    def preprocess(self, img):
        assert isinstance(img, np.ndarray), "input image data type should be numpy.ndarray"
        assert img.shape[0] > 0 and img.shape[1] > 0, "input image shape error"

        ih, iw = img.shape[0], img.shape[1]
        # 按 YOLOX 参考实现：等比例缩放并在右下角补 114，左上对齐
        self.im_scale = min(self.normW / iw, self.normH / ih)
        new_w = int(iw * self.im_scale)
        new_h = int(ih * self.im_scale)

        resized = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR).astype(np.uint8)
        padded = np.ones((self.normH, self.normW, 3), dtype=np.uint8) * 114
        padded[:new_h, :new_w] = resized

        # HWC(BGR) -> CHW + BGR->RGB，保持像素范围0..255（不做/255）
        padded = padded.transpose((2, 0, 1))[::-1]
        padded = np.ascontiguousarray(padded).astype(np.float32)
        if len(padded.shape) == 3:
            padded = padded[None]
        return padded

    def xywh2xyxy(self, x):
        y = np.copy(x)
        y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
        y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
        y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
        y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
        return y

    def nms(self, boxes, scores):
        if scores.shape[0] < 1 or boxes.shape[0] != scores.shape[0]:
            return None
        order = scores.argsort()[::-1]
        x1 = boxes[:, 0]
        y1 = boxes[:, 1]
        x2 = boxes[:, 2]
        y2 = boxes[:, 3]
        area = (y2 - y1 + 1) * (x2 - x1 + 1)
        boxes_left = []
        while len(order) > 0:
            i = order[0]
            boxes_left.append(i)
            xx1 = np.maximum(x1[i], x1[order[1:]])
            yy1 = np.maximum(y1[i], y1[order[1:]])
            xx2 = np.minimum(x2[i], x2[order[1:]])
            yy2 = np.minimum(y2[i], y2[order[1:]])
            w = np.maximum(0.0, xx2 - xx1 + 1)
            h = np.maximum(0.0, yy2 - yy1 + 1)
            inter = w * h
            iou = inter / (area[i] + area[order[1:]] - inter)
            ids = np.where(iou <= self.iou_thres)[0]
            order = order[ids + 1]
        return boxes_left

    def _ensure_grids(self):
        """构建YOLOX网格用于解码"""
        if self._grids_cached is not None and self._strides_cached is not None:
            return self._grids_cached, self._strides_cached
        
        strides = [8, 16, 32]
        grids = []
        strides_list = []
        
        for s in strides:
            ny, nx = self.normH // s, self.normW // s
            yv, xv = np.meshgrid(np.arange(ny), np.arange(nx), indexing='ij')
            grid = np.stack((xv, yv), axis=-1).reshape(-1, 2).astype(np.float32)
            grids.append(grid)
            strides_list.append(np.full((grid.shape[0], 1), s, dtype=np.float32))
        
        self._grids_cached = np.concatenate(grids, axis=0)
        self._strides_cached = np.concatenate(strides_list, axis=0)
        return self._grids_cached, self._strides_cached

    def postprocess(self, buffer, img_width, img_height):
        # buffer shape: [num_boxes, 85] => [cx, cy, w, h, obj, cls_scores...]
        def sigmoid(a):
            return 1.0 / (1.0 + np.exp(-a))

        if buffer.ndim != 2 or buffer.shape[1] < 6:
            return []

        x = buffer.copy()
        
        # YOLOX解码：从网格偏移解码到实际坐标
        if self._decode_needed and x.shape[0] == 8400:  # 标准YOLOX输出
            grid, stride = self._ensure_grids()
            # 解码坐标：(x + grid_x) * stride, (y + grid_y) * stride
            x[:, 0:2] = (x[:, 0:2] + grid) * stride
            # 解码尺寸：exp(w) * stride, exp(h) * stride  
            x[:, 2:4] = np.exp(x[:, 2:4]) * stride
        
        # 直接使用输出分数（与参考实现一致）：scores = obj * cls
        obj_scores = x[:, 4:5]
        cls_scores = x[:, 5:]
        scores_mat = obj_scores * cls_scores

        # 按类别无关策略选择每行的最大类与分数
        cls_max_indices = scores_mat.argmax(axis=1)
        final_scores = scores_mat[np.arange(scores_mat.shape[0]), cls_max_indices]

        keep_mask = final_scores > self.conf_thres
        if not np.any(keep_mask):
            return []

        boxes_decoded = x[keep_mask, :4]
        scores_kept = final_scores[keep_mask]
        classes_kept = cls_max_indices[keep_mask]
        print(f"[DEBUG] After filtering conf>{self.conf_thres}: {len(scores_kept)}/{len(x)} detections, score range: [{scores_kept.min():.4f}, {scores_kept.max():.4f}]")

        # 转换坐标格式：cx,cy,w,h -> x1,y1,x2,y2
        boxes_xyxy = self.xywh2xyxy(boxes_decoded)
        print(f"[DEBUG] Decoded coords range: x[{boxes_xyxy[:,0].min():.1f},{boxes_xyxy[:,0].max():.1f}] y[{boxes_xyxy[:,1].min():.1f},{boxes_xyxy[:,1].max():.1f}]")
        
        # 组装检测结果 [x1, y1, x2, y2, score, class]
        if len(boxes_xyxy) == 0:
            return []
            
        dets = np.column_stack([
            boxes_xyxy,
            scores_kept.reshape(-1, 1),
            classes_kept.reshape(-1, 1)
        ])

        # NMS
        if dets.shape[0] == 1:
            keep_indices = [0]
        else:
            keep_indices = self.nms(dets[:, :4], dets[:, 4])
            if keep_indices is None:
                keep_indices = []
        
        print(f"[DEBUG] After NMS: {len(keep_indices)}/{len(dets)} boxes kept")
        
        # 保留NMS后的检测结果
        final_dets = []
        for idx in keep_indices:
            final_dets.append(dets[idx])

        if len(final_dets) == 0:
            return []

        # 仅按比例还原（左上对齐，无需去 padding）
        scale = self.im_scale if self.im_scale is not None else min(self.normW / img_width, self.normH / img_height)
        for i, det in enumerate(final_dets):
            if det is None or len(det) < 6:
                continue
            det[0] = det[0] / scale
            det[1] = det[1] / scale
            det[2] = det[2] / scale
            det[3] = det[3] / scale
            det[0] = max(0, min(det[0], img_width))
            det[1] = max(0, min(det[1], img_height))
            det[2] = max(0, min(det[2], img_width))
            det[3] = max(0, min(det[3], img_height))
            
        print(f"[DEBUG] Final detections: {len(final_dets)}")
        return final_dets

    def detect(self, frame):
        images = [frame]
        results = self.detect_images(images)
        return results[0]

    def draw_boxes(self, frame, detections):
        stats = {cls: 0 for cls in self.class_names.values()}
        for det in detections:
            if len(det) < 6:
                continue

            x1, y1, x2, y2, conf, cls = map(float, det[:6])
            x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)

            if cls not in self.display_classes:
                continue

            class_name = self.class_names.get(int(cls), f"class_{int(cls)}")
            label = f'{class_name} {conf:.2f}'

            color = self.colors[int(cls)]
            cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
            cv2.putText(frame, label, (x1, y1 - 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

            stats[class_name] += 1

        return frame, stats

    def update_params(self, conf_thres=None, iou_thres=None, display_classes=None):
        if conf_thres is not None:
            self.conf_thres = conf_thres
        if iou_thres is not None:
            self.iou_thres = iou_thres
        if display_classes is not None:
            self.display_classes = display_classes


class WebStreamer:
    def __init__(self, detector, stream, model_dir='.', port=5000):
        self.detector = detector
        self.stream = stream
        self.port = port
        self.app = Flask(__name__)
        self.model_dir = model_dir
        self.fps = 0
        self.last_fps_update = time.time()
        self.frame_count = 0
        self.current_stats = {}

        template_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
        if not os.path.exists(template_folder):
            print(f"警告: 模板文件夹 '{template_folder}' 不存在")
            print("请确保在应用目录下创建templates文件夹，并将index.html放入其中")

        self.available_models = self._get_available_models()

        self.app.route('/')(self.index)
        self.app.route('/video_feed')(self.video_feed)
        self.app.route('/stats')(self.get_stats)
        self.app.route('/update_params', methods=['POST'])(self.update_params)
        self.app.route('/change_model', methods=['POST'])(self.change_model)

    def _get_available_models(self):
        models = []
        if os.path.exists(self.model_dir):
            for file in os.listdir(self.model_dir):
                if file.endswith('.onnx') and file.startswith('yolox'):
                    models.append(file)
        print(f"找到 {len(models)} 个可用模型: {models}")
        return models

    def index(self):
        current_model_name = os.path.basename(self.detector.name)
        print(f"可用模型: {self.available_models}")
        print(f"当前模型: {current_model_name}")
        print(f"类别数量: {len(self.detector.class_names)}")

        return render_template('index.html',
                               models=self.available_models,
                               current_model=current_model_name,
                               conf_thres=self.detector.conf_thres,
                               iou_thres=self.detector.iou_thres,
                               class_names=self.detector.class_names)

    def video_feed(self):
        return Response(self._generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')

    def _generate_frames(self):
        while True:
            frame = self.stream.read()
            if frame is None:
                time.sleep(0.1)
                continue

            detections = self.detector.detect(frame)
            frame_with_boxes, self.current_stats = self.detector.draw_boxes(frame, detections)

            self.frame_count += 1
            current_time = time.time()
            elapsed = current_time - self.last_fps_update
            if elapsed >= 1.0:
                self.fps = self.frame_count / elapsed
                self.frame_count = 0
                self.last_fps_update = current_time

            ret, buffer = cv2.imencode('.jpg', frame_with_boxes)
            frame_bytes = buffer.tobytes()

            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')

    def get_stats(self):
        return jsonify({
            'fps': self.fps,
            'objects': sum(self.current_stats.values()),
            'people': self.current_stats.get('person', 0),
            'vehicles': sum([self.current_stats.get(cls, 0) for cls in ['car', 'truck', 'bus', 'motorcycle']])
        })

    def update_params(self):
        data = request.json

        if 'conf_thres' in data:
            conf_thres = float(data['conf_thres'])
            self.detector.update_params(conf_thres=conf_thres)
            print(f"更新置信度阈值为: {conf_thres}")

        if 'iou_thres' in data:
            iou_thres = float(data['iou_thres'])
            self.detector.update_params(iou_thres=iou_thres)
            print(f"更新IoU阈值为: {iou_thres}")

        if 'display_classes' in data:
            display_classes = [int(cls) for cls in data['display_classes']]
            self.detector.update_params(display_classes=display_classes)
            print(f"更新显示类别为: {[self.detector.class_names[cls] for cls in display_classes]}")

        return jsonify({'status': 'success'})

    def change_model(self):
        model_name = request.json.get('model_name')
        if not model_name or model_name not in self.available_models:
            print(f"错误: 模型 {model_name} 不存在")
            return jsonify({'status': 'error', 'message': '模型不存在'})

        try:
            print(f"正在切换到模型: {model_name}")

            current_conf = self.detector.conf_thres
            current_iou = self.detector.iou_thres
            current_classes = self.detector.display_classes

            model_path = os.path.join(self.model_dir, model_name)
            self.detector = YOLOXDetector(onnx_file=model_path,
                                          device=self.detector.device,
                                          conf_thres=current_conf,
                                          iou_thres=current_iou)
            self.detector.display_classes = current_classes

            print(f"成功切换到模型: {model_name}")
            return jsonify({'status': 'success', 'message': f'已切换到模型: {model_name}'})
        except Exception as e:
            print(f"切换模型失败: {str(e)}")
            return jsonify({'status': 'error', 'message': str(e)})

    def start(self):
        print(f"Web服务启动: http://localhost:{self.port}")
        self.app.run(host='0.0.0.0', port=self.port, threaded=True)


if __name__ == "__main__":
    # 配置参数（与 yolov5_web.py 保持风格一致）
    RTSP_URL = "/root/test_videos/test_car_person_1080P.h264"
    MODEL_DIR = "/root/models"
    MODEL_WEIGHTS = "yolox_l.onnx"
    PORT = 5000

    stream = RTSPStream(RTSP_URL).start()
    time.sleep(2)

    ONNX_FILE = os.path.join(MODEL_DIR, MODEL_WEIGHTS)
    detector = YOLOXDetector(onnx_file=ONNX_FILE)

    web_streamer = WebStreamer(detector, stream, model_dir=MODEL_DIR, port=PORT)
    web_streamer.start()