import numpy as np
import cv2
from ultralytics import YOLO
import warnings
import argparse
warnings.filterwarnings("ignore")

class PersonDetector:
    """
    人体检测类，用于检测图像中的人体
    """
    def __init__(self, model_path="license_models/yolo11s.pt"):
        # 加载模型
        self.model = YOLO(model_path)
        self.model.fuse()
    
    def _is_in_area(self, bbox, target_area):
        """
        检查检测框是否在目标区域内
        :param bbox: 检测框 [x1, y1, x2, y2]
        :param target_area: 目标区域 [x1, y1, x2, y2]
        :return: bool
        """
        # 计算两个矩形的交集
        x1 = max(bbox[0], target_area[0])
        y1 = max(bbox[1], target_area[1])
        x2 = min(bbox[2], target_area[2])
        y2 = min(bbox[3], target_area[3])
        
        # 计算交集面积
        inter_area = max(0, x2 - x1) * max(0, y2 - y1)
        # 计算检测框面积
        bbox_area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
        
        # 如果交集面积大于检测框面积的50%，则认为在目标区域内
        return inter_area > 0.2 * bbox_area
    
    def detect_persons(self, img, conf_threshold=0.5, target_area=None):
        """
        检测图像中的人体
        :param img: 输入图像 (numpy array)
        :param conf_threshold: 置信度阈值
        :param target_area: 目标区域 [x1, y1, x2, y2]，如果为None则检测整个图像
        :return: (检测到的人体列表, 目标区域是否有人)
        """
        # 执行检测
        results = self.model(img, verbose=False)
        
        persons = []
        target_has_person = False
        
        if len(results) > 0 and hasattr(results[0], 'boxes') and results[0].boxes is not None:
            for result in results[0].boxes.data.tolist():
                if len(result) >= 6:  # 确保数据格式正确 [x1, y1, x2, y2, conf, cls_id]
                    x1, y1, x2, y2, conf, cls_id = result[:6]
                    if conf >= conf_threshold and int(cls_id) == 0:  # 0 是人体的类别ID
                        person_rect = [int(x1), int(y1), int(x2), int(y2)]
                        persons.append({
                            'bbox': person_rect,
                            'confidence': float(conf),
                            'in_target': target_area is not None and self._is_in_area(person_rect, target_area)
                        })
                        
                        # 检查是否在目标区域内
                        if target_area and not target_has_person and persons[-1]['in_target']:
                            target_has_person = True
        
        return persons, target_has_person

def process_video(video_path=None, output_path='output.mp4', frame_skip=0, target_area=None):
    """
    处理视频文件或摄像头输入
    :param video_path: 视频文件路径，如果为None则使用摄像头
    :param output_path: 输出视频文件路径
    :param frame_skip: 跳帧数，例如设置为2表示每处理1帧跳过2帧
    :param target_area: 目标区域 [x1, y1, x2, y2]，如果为None则自动设置为中心区域
    """
    # 初始化视频捕获
    cap = cv2.VideoCapture(0 if video_path is None else video_path)
    
    if not cap.isOpened():
        print("错误: 无法打开视频源")
        return
    
    # 获取视频属性
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    
    # 如果没有指定目标区域，则使用图像中心1/4区域
    if target_area is None:
        target_w, target_h = width // 2, height // 2
        target_x = (width - target_w) // 2
        target_y = (height - target_h) // 2
        target_area = [target_x, target_y, target_x + target_w, target_y + target_h]
    
    # 创建视频写入对象
    if output_path:
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))
    
    # 初始化人体检测器
    detector = PersonDetector()
    
    print(f"开始处理视频，按'q'键退出...")
    
    frame_count = 0
    while True:
        ret, frame = cap.read()
        if not ret:
            break
            
        # 跳帧处理
        frame_count += 1
        if frame_skip > 0 and frame_count % (frame_skip + 1) != 0:
            if output_path:
                out.write(frame)
            cv2.imshow('Person Detection', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            continue
        
        # 执行人体检测
        persons, has_person = detector.detect_persons(frame, target_area=target_area)
        
        # 绘制目标区域
        cv2.rectangle(frame, 
                     (target_area[0], target_area[1]), 
                     (target_area[2], target_area[3]), 
                     (0, 0, 255), 2)  # 红色边框表示目标区域
        
        # 在帧上绘制结果
        for person in persons:
            x1, y1, x2, y2 = person['bbox']
            conf = person['confidence']
            
            # 绘制边界框
            color = (0, 255, 0)  # 绿色
            if person.get('in_target', False):
                color = (0, 0, 255)  # 如果在目标区域内，使用红色
            
            cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
            
            # 显示置信度
            label = f'Person: {conf:.2f}'
            (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1)
            cv2.rectangle(frame, (x1, y1 - 20), (x1 + w, y1), color, -1)
            cv2.putText(frame, label, (x1, y1 - 5), 
                       cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 1)
        
        # 在目标区域上方显示状态
        status = "someone here!" if has_person else "nobody here!"
        status_color = (0, 0, 255) if has_person else (0, 255, 0)
        cv2.putText(frame, f"status: {status}", 
                   (target_area[0], target_area[1] - 10), 
                   cv2.FONT_HERSHEY_SIMPLEX, 0.7, status_color, 2)
        
        # 显示处理后的帧
        cv2.imshow('Person Detection', frame)
        
        # 写入输出视频
        if output_path:
            out.write(frame)
        
        # 按'q'键退出
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
    
    # 释放资源
    cap.release()
    if output_path:
        out.release()
    cv2.destroyAllWindows()
    print("处理完成")

def detect_image(image_path, output_path='output.jpg'):
    """
    检测单张图片中的人体
    :param image_path: 输入图片路径
    :param output_path: 输出图片路径
    """
    # 读取图片
    img = cv2.imread(image_path)
    if img is None:
        print("错误: 无法读取图片")
        return
    
    # 初始化人体检测器
    detector = PersonDetector()
    
    # 执行人体检测
    persons = detector.detect_persons(img)
    
    # 在图片上绘制结果
    for person in persons:
        x1, y1, x2, y2 = person['bbox']
        conf = person['confidence']
        
        # 绘制边界框
        cv2.rectangle(img, (x1, y1), (x2, y2), (0, 255, 0), 2)
        
        # 显示置信度
        label = f'Person: {conf:.2f}'
        (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1)
        cv2.rectangle(img, (x1, y1 - 20), (x1 + w, y1), (0, 255, 0), -1)
        cv2.putText(img, label, (x1, y1 - 5), 
                   cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 1)
    
    # 保存结果
    cv2.imwrite(output_path, img)
    print(f"结果已保存到 {output_path}")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='人体检测程序')
    parser.add_argument('--source', type=str, default='0', 
                       help='视频文件路径或摄像头ID（默认：0）')
    parser.add_argument('--output', type=str, default='output.mp4',
                       help='输出视频文件路径（默认：output.mp4）')
    parser.add_argument('--frame-skip', type=int, default=0,
                       help='跳帧数（默认：0，不跳帧）')
    parser.add_argument('--image', action='store_true',
                       help='处理单张图片而非视频')
    parser.add_argument('--area', type=str, default=None,
                       help='目标区域，格式为x1,y1,x2,y2（例如：100,100,500,400）')
    
    args = parser.parse_args()
    
    # 解析目标区域参数
    target_area = None
    if args.area:
        try:
            x1, y1, x2, y2 = map(int, args.area.split(','))
            target_area = [x1, y1, x2, y2]
        except:
            print("错误：目标区域格式不正确，请使用x1,y1,x2,y2格式")
            exit(1)
    
    if args.image:
        # 处理单张图片
        detect_image(args.source, args.output)
    else:
        # 处理视频或摄像头
        source = int(args.source) if args.source.isdigit() else args.source
        process_video(video_path=source, 
                     output_path=args.output, 
                     frame_skip=args.frame_skip,
                     target_area=target_area)