from flask import Flask, Response, request, jsonify
import argparse
import torch
import cv2
import time
from flask_cors import CORS
from flask_socketio import SocketIO
from collections import Counter

# 导入YOLOv5模块
from yolov5.models.common import DetectMultiBackend
from yolov5.utils.dataloaders import LoadStreams
from yolov5.utils.general import (check_img_size, non_max_suppression, scale_boxes)
from yolov5.utils.plots import Annotator, colors
from yolov5.utils.torch_utils import select_device

import pathlib

# 解决模型路径报错问题
temp = pathlib.PosixPath
pathlib.PosixPath = pathlib.WindowsPath

app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}}, supports_credentials=True)

socketio = SocketIO(
    app,
    cors_allowed_origins="*",
    async_mode='threading'
)

# 全局变量存储模型和设置
model = None
device = None
names = None
class_mapping = None
allowed_classes = ['fire', 'smoke', 'people', 'climb', 'fight']

WEIGHT_PATH = 'weights/best.pt'


def initialize_model(weights=WEIGHT_PATH, device_id='0'):
    """初始化YOLOv5模型"""
    global model, device, names, class_mapping

    device = select_device(device_id)
    model = DetectMultiBackend(weights, device=device, dnn=False)
    stride, names, pt = model.stride, model.names, model.pt
    imgsz = check_img_size((640, 640), s=stride)  # 检查图像尺寸

    # 预热模型
    model.warmup(imgsz=(1, 3, *imgsz))

    # 创建类别映射
    class_mapping = {i: name for i, name in enumerate(allowed_classes)}

    return imgsz, stride, pt


def generate_frames(rtmp_url, conf_thres=0.25, iou_thres=0.45, max_det=1000):
    """处理RTMP流并产生处理后的帧"""
    global model, device, names, class_mapping, allowed_classes

    imgsz, stride, pt = initialize_model()

    # 加载RTMP流
    dataset = LoadStreams(rtmp_url, img_size=imgsz, stride=stride, auto=pt)

    # 处理每一帧
    for path, im, im0s, vid_cap, s in dataset:
        # 转换图像格式
        im = torch.from_numpy(im).to(device)
        im = im.half() if model.fp16 else im.float()  # uint8 to fp16/32
        im /= 255  # 0 - 255 to 0.0 - 1.0
        if len(im.shape) == 3:
            im = im[None]  # 扩展批次维度

        # 推理
        pred = model(im, augment=False, visualize=False)

        # NMS
        pred = non_max_suppression(pred, conf_thres, iou_thres, None, False, max_det=max_det)

        # 处理每个检测结果
        for i, det in enumerate(pred):  # 每个图像
            im0 = im0s[i].copy()

            # 添加检测框
            annotator = Annotator(im0, line_width=3, example=str(names))
            detections = []
            # 创建计数器 - 统计每种类别的对象数量
            object_counter = Counter()

            if len(det):
                # 将框从img_size缩放到im0大小
                det[:, :4] = scale_boxes(im.shape[2:], det[:, :4], im0.shape).round()

                # 绘制结果 - 只有允许的类别
                for *xyxy, conf, cls in reversed(det):
                    c = int(cls)  # 整数类
                    class_name = class_mapping.get(c, "unknown")

                    # 只处理允许的类别
                    if class_name in allowed_classes:
                        label = f"{class_name} {conf:.2f}"
                        annotator.box_label(xyxy, label, color=colors(c, True))

                        # 将检测结果添加到列表
                        x1, y1, x2, y2 = [int(coord.item()) for coord in xyxy]
                        detections.append({
                            'class': class_name,
                            'confidence': float(conf),
                            'bbox': [x1, y1, x2, y2]
                        })

                        # 更新对象计数器
                        object_counter[class_name] += 1

            # 通过WebSocket发送计数数据
            count_data = {
                'timestamp': time.time(),
                'counts': {cls: count for cls, count in object_counter.items()},
                'total': sum(object_counter.values())
            }
            socketio.emit('detection_counts', count_data)

            # 通过WebSocket发送位置数据
            location_data = {
                'latitude': 125.282574,
                'longitude': 43.821519,
                'timestamp': time.time(),
                'name': '吉林省长春市朝阳区吉林大学（前卫南区）王湘浩楼'
            }
            # TODO: 从无人机API中获取实时经纬度信息
            socketio.emit('location_update', location_data)

            # 获取带注释的图像
            result_frame = annotator.result()

            # 将OpenCV BGR格式转为网络流可用的JPEG
            ret, buffer = cv2.imencode('.jpg', result_frame)
            frame = buffer.tobytes()

            # 生成帧和检测数据
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')


@app.route('/api/video_feed')
def video_feed():
    """提供视频流端点"""
    # 从请求参数获取RTMP URL
    rtmp_url = request.args.get('rtmp_url', 'rtmp://example.com/live/stream')

    return Response(
        generate_frames(rtmp_url),
        mimetype='multipart/x-mixed-replace; boundary=frame'
    )


@app.route('/api/start_detection', methods=['POST'])
def start_detection():
    """启动检测流程的API端点"""
    data = request.json
    rtmp_url = data.get('rtmp_url')

    if not rtmp_url:
        return jsonify({'error': 'RTMP URL is required'}), 400

    return jsonify({
        'status': 'success',
        'message': 'Detection started',
        'stream_url': f'/api/video_feed?rtmp_url={rtmp_url}'
    })


@app.route('/api/status', methods=['GET'])
def status():
    """检查服务状态的API端点"""
    return jsonify({
        'status': 'running',
        'allowed_classes': allowed_classes
    })


# WebSocket连接事件
@socketio.on('connect')
def handle_connect():
    print('Client connected')


@socketio.on('disconnect')
def handle_disconnect():
    print('Client disconnected')


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', type=str, default=WEIGHT_PATH, help='模型权重路径')
    parser.add_argument('--device', default='0', help='cuda设备，例如0或0,1,2,3或cpu')
    opt = parser.parse_args()

    # 初始化模型
    initialize_model(weights=opt.weights, device_id=opt.device)

    # 启动Flask应用（使用SocketIO运行）
    try:
        socketio.run(app, host='0.0.0.0', port=5000, debug=True)
    except Exception as e:
        print(f"Error starting the server: {e}")
