from django.shortcuts import render
from django.http import StreamingHttpResponse

import cv2
import threading

from ultralytics import YOLO

# 初始化 ultralytics 模型
model = YOLO('yolov8n.pt')

# 摄像头类，负责捕获视频帧并进行YOLOv8推理
class VideoCamera(object):
    def __init__(self):
        # 获取摄像头，尝试使用DShow后端（适用于Windows）
        self.video = cv2.VideoCapture(0, cv2.CAP_DSHOW)
        if not self.video.isOpened():
            raise Exception("无法打开摄像头")

        # 设置摄像头参数（可选）
        self.video.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
        self.video.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
        self.video.set(cv2.CAP_PROP_FPS, 30)

        # person_count变量
        self.person_count = 0

        # 初始读取一帧
        (self.grabbed, self.frame) = self.video.read()
        # 启动线程不断更新帧
        self.lock = threading.Lock()
        threading.Thread(target=self.update, args=(), daemon=True).start()

    def __del__(self):
        if self.video.isOpened():
            self.video.release()

    # 获取当前帧并进行YOLOv8推理
    def get_frame(self):
        with self.lock:
            if self.grabbed:
                # 使用YOLOv8进行推理
                results = model(self.frame)  # 对当前帧进行预测

                # 获取当前person的数量
                person_count = 0
                if results[0].boxes is not None:
                    # 获取所有检测到的类别
                    classes = results[0].boxes.cls
                    # 统计person的数量
                    person_count = (classes == 0).sum().item()

                # 在帧上绘制检测结果（边界框、标签、置信度）
                annotated_frame = results[0].plot()  # 自动绘制检测结果

                # 在图像上添加person的数量文本
                cv2.putText(annotated_frame, f"persons: {person_count}", (10, 30),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)

                # 将处理后的帧编码为JPEG格式
                _, jpeg = cv2.imencode('.jpg', annotated_frame)
                return jpeg.tobytes()
            else:
                # 返回一个错误图像或None
                return None

    # 持续从摄像头读取帧
    def update(self):
        while True:
            (grabbed, frame) = self.video.read()
            with self.lock:
                self.grabbed = grabbed
                self.frame = frame


# 生成器函数，持续产生视频帧
def gen(camera):
    while True:
        frame = camera.get_frame()
        if frame is not None:
            # 以MJPEG流格式 yield 帧数据
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
        else:
            # 可返回一个错误图像或等待重试
            pass

# Create your views here.
def video_stream(request):
    try:
        cam = VideoCamera()
        # 使用StreamingHttpResponse以multipart/x-mixed-replace内容类型返回流响应
        return StreamingHttpResponse(gen(cam), content_type="multipart/x-mixed-replace;boundary=frame")
    except Exception as e:
        print(f"视频流错误：{e}")
        return HttpResponse(status=500)

def dashboard_view(request):
    return render(request, 'dashboard.html')