import cv2
import numpy as np
from ultralytics import YOLO

import torch
import torchvision
from torchvision import transforms

CLASSES = ['person']

COLORS = [(255, 0, 0), (0, 255, 0)]

def box_overlap_ratio(box1, box2, which_box=1):
    """
    计算重叠面积占指定矩形框的比例
    
    参数:
    box1 -- 第一个矩形框
    box2 -- 第二个矩形框
    which_box -- 1表示计算占box1的比例，2表示占box2的比例
    
    返回:
    ratio -- 重叠面积占指定矩形框的比例
    """
    # 计算相交区域
    x_left = max(box1[0], box2[0])
    y_top = max(box1[1], box2[1])
    x_right = min(box1[2], box2[2])
    y_bottom = min(box1[3], box2[3])
    
    if x_right < x_left or y_bottom < y_top:
        return 0.0
    
    intersection_area = (x_right - x_left) * (y_bottom - y_top)
    
    if which_box == 1:
        box_area = (box1[2] - box1[0]) * (box1[3] - box1[1])
    else:
        box_area = (box2[2] - box2[0]) * (box2[3] - box2[1])
    
    return intersection_area / box_area


def draw_person(image, results):
    mask_img = image.copy()
    for box in results[0].boxes:
        cls = int(box.cls.cpu().numpy()[0])
        if cls != 0 :
            print("== ignore other cls ", cls)
            continue
        color = COLORS[0]
        xyxy = box.xyxy[0].cpu().numpy().tolist()
        x1, y1, x2, y2 = [int(_) for _  in xyxy]
        #print("== detect box ", (x1, y1, x2, y2), " cls ", cls)
        cv2.rectangle(mask_img, (x1, y1), (x2, y2), color, 2)
        box = mask_img[y1:y2, x1:x2, :]
    return mask_img

def yolo_detect():
    model_path = 'models/yolo11m.pt'
    detector = YOLO(model_path) 
    
    v_idx = 1
    input_video = '/home/imvision/hy/samurai-master/data/output%03d.mp4' % v_idx
    reader = cv2.VideoCapture(input_video)
    
    width = int(reader.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
    
    output_video = 'datas/v%03d_person_out.mp4' % v_idx
    writer = cv2.VideoWriter(output_video, cv2.VideoWriter_fourcc(*"mp4v"), 25, (width, height))
    more = True
    frame_idx = -1
    while more:
        more, frame = reader.read()
        frame_idx += 1
        if not more:
            break
        if frame_idx % 10 == 0:
            print("=== frame finish ", frame_idx)
    
        results = detector(frame)
        frame_out = draw_person(frame, results)
        writer.write(frame_out)
    #print("== results ", results)
    reader.release()
    writer.release()

def faster_rcnn_detect():
    faster_rcnn_model = torchvision.models.detection.fasterrcnn_resnet50_fpn(weights="COCO_V1")
    faster_rcnn_model.eval()

    valid_rect = (5, 691, 3706, 1454) 
    image = cv2.imread('datas/v000_1.jpg')
    transform = transforms.Compose([transforms.ToTensor()])
    img_tensor = transform(image).unsqueeze(0)

    with torch.no_grad():
        predictions = faster_rcnn_model(img_tensor)
        print("=== predictions ", predictions)
    
    rcnn_boxes = predictions[0]['boxes'].cpu().numpy()
    rcnn_scores = predictions[0]['scores'].cpu().numpy()
    rcnn_labels = predictions[0]['labels'].cpu().numpy()
    rcnn_image = image.copy()

    valid_person_boxes = []
    color = COLORS[0]
    for i in range(len(rcnn_labels)):
        if rcnn_labels[i] == 1 and rcnn_scores[i] > 0.45:
            box = rcnn_boxes[i]
            x1, y1, x2, y2 = [int(_) for _ in box]
            p_rect = (x1, y1, x2, y2)
            if box_overlap_ratio(p_rect, valid_rect) > 0.9:
                cv2.rectangle(rcnn_image, (x1, y1), (x2, y2), color, 2)
                valid_person_boxes.append(p_rect)

    m_x1, m_y1, m_x2, m_y2 = valid_rect
    cv2.rectangle(rcnn_image, (m_x1, m_y1), (m_x2, m_y2), COLORS[1], 2)
    cv2.imwrite('./datas/rcnn_v0.jpg', rcnn_image)

if __name__ == '__main__':
    faster_rcnn_detect()
