import cv2
import threading
import time
import torch
import numpy as np
from flask import Flask, Response, render_template, request, jsonify
import os
from enum import Enum
import RivsInference

class COCOLabels(Enum):
    person = 0
    bicycle = 1
    car = 2
    motorbike = 3
    aeroplane = 4
    bus = 5
    train = 6
    truck = 7
    boat = 8
    traffic_light = 9
    fire_hydrant = 10
    stop_sign = 11
    parking_meter = 12
    bench = 13
    bird = 14
    cat = 15
    dog = 16
    horse = 17
    sheep = 18
    cow = 19
    elephant = 20
    bear = 21
    zebra = 22
    giraffe = 23
    backpack = 24
    umbrella = 25
    handbag = 26
    tie = 27
    suitcase = 28
    frisbee = 29
    skis = 30
    snowboard = 31
    sports_ball = 32
    kite = 33
    baseball_bat = 34
    baseball_glove = 35
    skateboard = 36
    surfboard = 37
    tennis_racket = 38
    bottle = 39
    wine_glass = 40
    cup = 41
    fork = 42
    knife = 43
    spoon = 44
    bowl = 45
    banana = 46
    apple = 47
    sandwich = 48
    orange = 49
    broccoli = 50
    carrot = 51
    hot_dog = 52
    pizza = 53
    donut = 54
    cake = 55
    chair = 56
    sofa = 57
    pottedplant = 58
    bed = 59
    diningtable = 60
    toilet = 61
    tvmonitor = 62
    laptop = 63
    mouse = 64
    remote = 65
    keyboard = 66
    cell_phone = 67
    microwave = 68
    oven = 69
    toaster = 70
    sink = 71
    refrigerator = 72
    book = 73
    clock = 74
    vase = 75
    scissors = 76
    teddy_bear = 77
    hair_drier = 78
    toothbrush = 79


class RTSPStream:
    def __init__(self, rtsp_url):
        self.rtsp_url = rtsp_url
        self.frame = None
        self.lock = threading.Lock()
        self.stopped = False
        self.thread = None
        
    def start(self):
        """启动RTSP流捕获线程"""
        self.thread = threading.Thread(target=self._capture_frames, daemon=True)
        self.thread.start()
        return self
    
    def _capture_frames(self):
        """持续从RTSP流获取帧"""
        cap = cv2.VideoCapture(self.rtsp_url)
        
        # 检查摄像头是否成功打开
        if not cap.isOpened():
            print(f"无法打开RTSP流: {self.rtsp_url}")
            self.stopped = True
            return
            
        while not self.stopped:
            ret, frame = cap.read()
            if not ret:
                print("无法获取帧，尝试重新连接...")
                # 尝试重新连接
                cap.release()
                time.sleep(1)
                cap = cv2.VideoCapture(self.rtsp_url)
                continue
                
            # 更新当前帧
            with self.lock:
                self.frame = frame
                
            time.sleep(0.01)  # 降低CPU使用率
            
        cap.release()
    
    def read(self):
        """获取最新帧"""
        with self.lock:
            return self.frame.copy() if self.frame is not None else None
    
    def stop(self):
        """停止流捕获"""
        self.stopped = True
        if self.thread:
            self.thread.join(timeout=2.0)


class YOLOv5Detector:
    def __init__(self, onnx_file='yolov5s.onnx', device=None, conf_thres=0.25, iou_thres=0.45):
        """初始化YOLOv5检测器"""
        # 自动检测设备
        card_id = 0
        cluster_ids = [0]
        
        engine_path = os.path.join(os.path.dirname(onnx_file), os.path.basename(onnx_file) + ".exec")
        print("input onnx_file: ", onnx_file, ",engine_path: ", engine_path)
        self.handler = RivsInference.set_device(card_id, cluster_ids)

        # parse onnx
        parser = RivsInference.create_parser(RivsInference.ONNX_MODEL)
        parser.set_input_names(['images'])
        parser.set_output_names(['output'])
        parser.set_input_shapes(['1,3,640,640'])
        parser.set_input_dtypes("DT_FLOAT32")

        if not os.path.exists(engine_path):
            network = parser.read(onnx_file)
            optimizer = RivsInference.create_optimizer()
            #optimizer.set_build_flag(RivsInference.KFP16)
            #optimizer.set_build_flag(RivsInference.KDEFAULT)
            optimizer.set_build_flag(RivsInference.KFP16_MIX)
            self.engine = optimizer.build(network)
            self.engine.save_executable(engine_path)
        else:
            self.engine = RivsInference.load(engine_path)

        assert self.engine is not None

        self.name = "yolov5s"
        self.device = device
        print(f"模型运行在设备: {device}")

        self.normH = 640
        self.normW = 640
        self.img_scale = None

        # thresholds
        self.conf_thres = conf_thres # score_threshold
        self.iou_thres = iou_thres # nms_threshold

        # 确保正确获取类别名称
        self.class_names = {item.value: item.name for item in COCOLabels}
        print(f"已加载 {len(self.class_names)} 个类别")
        
        self.colors = [[np.random.randint(0, 255) for _ in range(3)] for _ in self.class_names]
        self.display_classes = list(range(len(self.class_names)))  # 默认显示所有类别

    def destroy(self):
        RivsInference.release_device(self.handler)

    def detect_images(self, images):
        inputs = []
        outputs = []

        for image in images:
            processed_img = self.preprocess(image)
            processed_img = np.around(processed_img, decimals=5)
            inputs.append(processed_img)

        # Use sync mode, py_stream=None by default
        py_future = self.engine.run_with_batch(
            sample_nums=len(images),
            input_list=np.array([inputs]),
            buffer_type=RivsInference.TIF_ENGINE_RSC_IN_HOST_OUT_HOST,
        )

        pred = py_future.get()
        pred = np.asarray(pred)
        outputs.append(pred)
        outputs = outputs[0][0]  # 1,1,25200,85

        # postprocessing
        results = []
        for index, image in enumerate(images):
            image_shape = image.shape
            result = self.postprocess(outputs[index], image_shape[1], image_shape[0])
            results.append(result)

        return results

    def preprocess(self, img):
        assert isinstance(
            img, np.ndarray
        ), "input image data type should be numpy.ndarray"
        assert img.shape[0] > 0 and img.shape[1] > 0, "input image shape error"

        shape = img.shape[0:2]
        self.im_scale = min(self.normH / img.shape[0], self.normW / img.shape[1])
        new_imsize = int(round(shape[1] * self.im_scale)), int(
            round(shape[0] * self.im_scale)
        )  # w, h
        dw, dh = self.normW - new_imsize[0], self.normH - new_imsize[1]  # wh padding
        dw /= 2
        dh /= 2
        if shape[::-1] != new_imsize:
            img = cv2.resize(img, new_imsize, interpolation=cv2.INTER_LINEAR).astype(
                np.float32
            )
        top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
        left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
        img = cv2.copyMakeBorder(
            img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=(114, 114, 114)
        )  # add border

        img = img.transpose((2, 0, 1))[::-1]  # HWC to CHW, BGR to RGB
        img = np.ascontiguousarray(img).astype(np.float32)
        img /= 255.0
        if len(img.shape) == 3:
            img = img[None]
        return img

    def xywh2xyxy(self, x):
        y = np.copy(x)
        y[:, 0] = x[:, 0] - x[:, 2] / 2  # top left x
        y[:, 1] = x[:, 1] - x[:, 3] / 2  # top left y
        y[:, 2] = x[:, 0] + x[:, 2] / 2  # bottom right x
        y[:, 3] = x[:, 1] + x[:, 3] / 2  # bottom right y
        return y

    def nms(self, boxes, scores):
        if scores.shape[0] < 1 or boxes.shape[0] != scores.shape[0]:
            return None
        order = scores.argsort()[::-1]
        x1 = boxes[:, 0]
        y1 = boxes[:, 1]
        x2 = boxes[:, 2]
        y2 = boxes[:, 3]
        area = (y2 - y1 + 1) * (x2 - x1 + 1)
        boxes_left = []
        while len(order) > 0:
            i = order[0]
            boxes_left.append(i)
            xx1 = np.maximum(x1[i], x1[order[1:]])
            yy1 = np.maximum(y1[i], y1[order[1:]])
            xx2 = np.minimum(x2[i], x2[order[1:]])
            yy2 = np.minimum(y2[i], y2[order[1:]])
            w = np.maximum(0.0, xx2 - xx1 + 1)
            h = np.maximum(0.0, yy2 - yy1 + 1)
            inter = w * h
            iou = inter / (area[i] + area[order[1:]] - inter)
            ids = np.where(iou <= self.iou_thres)[0]
            order = order[ids + 1]
        return boxes_left        

    def postprocess(self, buffer, img_width, img_height):
        cands = buffer[..., 4] > self.conf_thres
        output = []
        scale = min(self.normW / img_width, self.normH / img_height)
        pad = (self.normW - img_width * scale) / 2, (
            self.normH - img_height * scale
        ) / 2
        # for x in buffer:
        x = buffer[cands]
        if not x.shape[0]:
            return []
        x[:, 5:] *= x[:, 4:5]
        boxes = self.xywh2xyxy(x[:, :4])
        conf = x[:, 4]
        cls = np.argmax(x[:, 5:], axis=1)
        box_num = boxes.shape[0]
        x = np.concatenate(
            (boxes, np.reshape(conf, (box_num, -1)), np.reshape(cls, (box_num, -1))),
            axis=1,
        )
        if x.shape[0] == 1:
            output.append(x)
        else:
            selected_boxes = self.nms(x[:, :4], x[:, 4])
            for box_id in selected_boxes:
                output.append(x[box_id])
        for i, det in enumerate(output):
            if min(det.shape) != 0:
                det[[0, 2]] -= pad[0]
                det[[1, 3]] -= pad[1]
                det[:4] /= scale
                det[[0, 2]].clip(0, img_height)
                det[[1, 3]].clip(0, img_width)
                # det[2].clamp_(0, img_height)
                # det[3].clamp_(0, img_width)
        return output

    def detect(self, frame):
        """对单帧进行目标检测"""
        #results = self.model(frame)
        #detections = results.pandas().xyxy[0]
        #return detections
        images =[]
        images.append(frame)
        results = self.detect_images(images)
        return results[0]
    
    def draw_boxes(self, frame, detections):
        """在帧上绘制检测框和标签"""
        stats = {cls: 0 for cls in self.class_names.values()}
        #print(f'detect boxes: {len(detections)}')
        for det in detections:  # 直接遍历 numpy 数组
            if len(det) < 6:  # 确保检测结果有足够的元素
                continue
                
            # 提取边界框坐标、置信度和类别
            x1, y1, x2, y2, conf, cls = map(float, det[:6])  # 假设格式为 [x1, y1, x2, y2, conf, cls]
            x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)  # 转换为整数坐标
            
            if cls not in self.display_classes:
                continue
                
            # 获取类别名称
            class_name = self.class_names.get(int(cls), f"class_{int(cls)}")
            label = f'{class_name} {conf:.2f}'
            
            # 绘制边界框和标签
            color = self.colors[int(cls)]
            cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
            cv2.putText(frame, label, (x1, y1-10), 
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
            
            # 更新统计信息
            stats[class_name] += 1
            
        return frame, stats
    
    def update_params(self, conf_thres=None, iou_thres=None, display_classes=None):
        """更新模型参数"""
        if conf_thres is not None:
            self.conf_thres = conf_thres
            
        if iou_thres is not None:
            self.iou_thres = iou_thres
            
        if display_classes is not None:
            self.display_classes = display_classes


class WebStreamer:
    def __init__(self, detector, stream, model_dir='.', port=5000):
        """初始化Web服务"""
        self.detector = detector
        self.stream = stream
        self.port = port
        self.app = Flask(__name__)
        self.model_dir = model_dir
        self.fps = 0
        self.last_fps_update = time.time()
        self.frame_count = 0
        self.current_stats = {}
        
        # 检查模板文件夹是否存在
        template_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'templates')
        if not os.path.exists(template_folder):
            print(f"警告: 模板文件夹 '{template_folder}' 不存在")
            print("请确保在应用目录下创建templates文件夹，并将index.html放入其中")
        
        # 获取可用模型列表
        self.available_models = self._get_available_models()
        
        # 注册路由
        self.app.route('/')(self.index)
        self.app.route('/video_feed')(self.video_feed)
        self.app.route('/stats')(self.get_stats)
        self.app.route('/update_params', methods=['POST'])(self.update_params)
        self.app.route('/change_model', methods=['POST'])(self.change_model)
    
    def _get_available_models(self):
        """获取可用的YOLOv5模型列表"""
        models = []
        if os.path.exists(self.model_dir):
            for file in os.listdir(self.model_dir):
                if file.endswith('.onnx') and file.startswith('yolov5'):
                    models.append(file)
        print(f"找到 {len(models)} 个可用模型: {models}")
        return models
    
    def index(self):
        """返回HTML页面"""
        # 修复：使用model.name获取模型名称，而不是yaml_file
        current_model_name = os.path.basename(self.detector.name)
        
        # 添加调试输出
        print(f"可用模型: {self.available_models}")
        print(f"当前模型: {current_model_name}")
        print(f"类别数量: {len(self.detector.class_names)}")
        
        return render_template('index.html', 
                              models=self.available_models,
                              current_model=current_model_name,
                              conf_thres=self.detector.conf_thres,
                              iou_thres=self.detector.iou_thres,
                              class_names=self.detector.class_names)
    
    def video_feed(self):
        """视频流路由，返回MJPEG格式"""
        return Response(self._generate_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
    
    def _generate_frames(self):
        """生成带检测结果的视频帧"""
        stats = {}
        
        while True:
            # 获取最新帧
            frame = self.stream.read()
            if frame is None:
                time.sleep(0.1)
                continue
                
            # 执行目标检测
            detections = self.detector.detect(frame)
            frame_with_boxes, self.current_stats = self.detector.draw_boxes(frame, detections)
            
            # 计算FPS
            self.frame_count += 1
            current_time = time.time()
            elapsed = current_time - self.last_fps_update
            
            if elapsed >= 1.0:  # 每秒更新一次FPS
                self.fps = self.frame_count / elapsed
                self.frame_count = 0
                self.last_fps_update = current_time
            
            # 转换为JPEG格式
            ret, buffer = cv2.imencode('.jpg', frame_with_boxes)
            frame_bytes = buffer.tobytes()
            
            # 生成MJPEG流
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
    
    def get_stats(self):
        """返回当前统计信息"""
        return jsonify({
            'fps': self.fps,
            'objects': sum(self.current_stats.values()),
            'people': self.current_stats.get('person', 0),
            'vehicles': sum([self.current_stats.get(cls, 0) for cls in ['car', 'truck', 'bus', 'motorcycle']])
        })
    
    def update_params(self):
        """更新模型参数"""
        data = request.json
        
        # 更新置信度阈值
        if 'conf_thres' in data:
            conf_thres = float(data['conf_thres'])
            self.detector.update_params(conf_thres=conf_thres)
            print(f"更新置信度阈值为: {conf_thres}")
            
        # 更新IoU阈值
        if 'iou_thres' in data:
            iou_thres = float(data['iou_thres'])
            self.detector.update_params(iou_thres=iou_thres)
            print(f"更新IoU阈值为: {iou_thres}")
            
        # 更新显示的类别
        if 'display_classes' in data:
            display_classes = [int(cls) for cls in data['display_classes']]
            self.detector.update_params(display_classes=display_classes)
            print(f"更新显示类别为: {[self.detector.class_names[cls] for cls in display_classes]}")
            
        return jsonify({'status': 'success'})
    
    def change_model(self):
        """切换模型"""
        model_name = request.json.get('model_name')
        if not model_name or model_name not in self.available_models:
            print(f"错误: 模型 {model_name} 不存在")
            return jsonify({'status': 'error', 'message': '模型不存在'})
            
        try:
            print(f"正在切换到模型: {model_name}")
            
            # 保存当前参数
            current_conf = self.detector.conf_thres
            current_iou = self.detector.iou_thres
            current_classes = self.detector.display_classes
            
            # 加载新模型
            model_path = os.path.join(self.model_dir, model_name)
            self.detector = YOLOv5Detector(onnx_file=model_path, 
                                          device=self.detector.device,
                                          conf_thres=current_conf, 
                                          iou_thres=current_iou)
            self.detector.display_classes = current_classes
            
            print(f"成功切换到模型: {model_name}")
            return jsonify({'status': 'success', 'message': f'已切换到模型: {model_name}'})
        except Exception as e:
            print(f"切换模型失败: {str(e)}")
            return jsonify({'status': 'error', 'message': str(e)})
    
    def start(self):
        """启动Web服务"""
        print(f"Web服务启动: http://localhost:{self.port}")
        self.app.run(host='0.0.0.0', port=self.port, threaded=True)


if __name__ == "__main__":
    # 配置参数
    RTSP_URL = "/root/test_data/video/test_car_person_1080P.mp4"  # 替换为实际RTSP地址
    MODEL_DIR = "/root/models"  # 模型存放目录
    MODEL_WEIGHTS = "yolov5s.onnx"  # 默认模型
    PORT = 5000  # Web服务端口
    
    # 创建并启动RTSP流
    stream = RTSPStream(RTSP_URL).start()
    time.sleep(2)  # 等待流稳定
    
    # 初始化YOLOv5检测器
    ONNX_FILE = os.path.join(MODEL_DIR, MODEL_WEIGHTS)
    detector = YOLOv5Detector(onnx_file=ONNX_FILE)
    
    # 创建并启动Web服务
    web_streamer = WebStreamer(detector, stream, model_dir=MODEL_DIR, port=PORT)
    web_streamer.start()

