import cv2
import numpy as np

# ========== 算法选择 ========== #
# 'baseline'：原有背景减法+IOU方法
# 'tld'：OpenTLD单目标跟踪（原图）
# 'tld_binary'：OpenTLD在二值化图像上跟踪
# 'csrt'：CSRT单目标跟踪
# 'csrt_binary'：CSRT在二值化图像上跟踪
mode = 'csrt'  # 可选 'baseline'、'tld'、'tld_binary'、'csrt'、'csrt_binary'

# 初始化背景减法器
background_subtractor = cv2.createBackgroundSubtractorMOG2()

# 打开视频文件
cap = cv2.VideoCapture('./tracking.mp4')  # 这里需要替换成你的视频路径
# cap = cv2.VideoCapture('./rc.mp4')  # 这里需要替换成你的视频路径

# 初始化变量
points = []
frame_index = 0

# ROI区域参数（以中间区域为例：宽50%，高70%）
def crop_roi(frame, roi_w_ratio=0.8, roi_h_ratio=0.8):
    h, w = frame.shape[:2]
    ch, cw = int(h * roi_h_ratio), int(w * roi_w_ratio)
    y1 = (h - ch) // 2
    x1 = (w - cw) // 2
    return frame[y1:y1+ch, x1:x1+cw]

# 检查视频是否打开成功
if not cap.isOpened():
    print("Error: Could not open video.")
    exit()

# 读取第一帧作为模板
ret, template_frame = cap.read()
if not ret:
    print("Error: Could not read first frame.")
    exit()
template_frame = crop_roi(template_frame, roi_w_ratio=1.0, roi_h_ratio=1.0)  # 用全画面做模板
template_gray = cv2.cvtColor(template_frame, cv2.COLOR_BGR2GRAY)

# ========== 鼠标点击确定3个锚点 ==========
anchor_points = []  # [(x, y), ...]
anchor_patches = []  # [patch, ...]
anchor_selected = 0
ANCHOR_NUM = 3

# 鼠标点击选点

def select_anchor(event, x, y, flags, param):
    global anchor_points, anchor_patches, anchor_selected
    if event == cv2.EVENT_LBUTTONDOWN and anchor_selected < ANCHOR_NUM:
        anchor_points.append((x, y))
        patch_size = 31
        x1 = max(0, x - patch_size // 2)
        y1 = max(0, y - patch_size // 2)
        x2 = min(template_frame.shape[1], x + patch_size // 2 + 1)
        y2 = min(template_frame.shape[0], y + patch_size // 2 + 1)
        anchor_patches.append(template_frame[y1:y2, x1:x2].copy())
        anchor_selected += 1

cv2.namedWindow('Select Anchors')
cv2.setMouseCallback('Select Anchors', select_anchor)
while True:
    temp = template_frame.copy()
    for pt in anchor_points:
        cv2.circle(temp, pt, 5, (0, 0, 255), -1)
    cv2.imshow('Select Anchors', temp)
    if anchor_selected >= ANCHOR_NUM or cv2.waitKey(1) & 0xFF == ord('q'):
        break
cv2.destroyWindow('Select Anchors')

if len(anchor_points) < ANCHOR_NUM:
    h, w = template_frame.shape[:2]
    anchor_points = [
        (w // 2, h // 2),
        (w // 4, h // 2),
        (3 * w // 4, h // 2)
    ]
    print(f"未选满3点，自动使用默认点: {anchor_points}")
    patch_size = 31
    anchor_patches = []
    for x, y in anchor_points:
        x1 = max(0, x - patch_size // 2)
        y1 = max(0, y - patch_size // 2)
        x2 = min(template_frame.shape[1], x + patch_size // 2 + 1)
        y2 = min(template_frame.shape[0], y + patch_size // 2 + 1)
        anchor_patches.append(template_frame[y1:y2, x1:x2].copy())

# 获取视频参数，初始化VideoWriter
fps = cap.get(cv2.CAP_PROP_FPS)
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
if mode == 'tld':
    out = cv2.VideoWriter('output_tld.mp4', fourcc, fps, (width, height))
elif mode == 'tld_binary':
    out = cv2.VideoWriter('output_tld_binary.mp4', fourcc, fps, (width, height))
elif mode == 'csrt':
    out = cv2.VideoWriter('output_csrt.mp4', fourcc, fps, (width, height))
elif mode == 'csrt_binary':
    out = cv2.VideoWriter('output_csrt_binary.mp4', fourcc, fps, (width, height))
else:
    out = cv2.VideoWriter('output.mp4', fourcc, fps, (width, height))
thresh_out = cv2.VideoWriter('thresh_output.mp4', fourcc, fps, (width, height), isColor=False)

# ========== 鼠标框选车辆 ==========
drawing = False
start_point = end_point = None
bbox = None

def draw_rectangle(event, x, y, flags, param):
    global drawing, start_point, end_point, bbox
    if event == cv2.EVENT_LBUTTONDOWN:
        drawing = True
        start_point = (x, y)
        end_point = (x, y)
    elif event == cv2.EVENT_MOUSEMOVE and drawing:
        end_point = (x, y)
    elif event == cv2.EVENT_LBUTTONUP:
        drawing = False
        end_point = (x, y)
        if start_point is not None and end_point is not None:
            x1, y1 = start_point
            x2, y2 = end_point
            bbox = (min(x1, x2), min(y1, y2), abs(x2 - x1), abs(y2 - y1))

cv2.namedWindow('Select Car')
cv2.setMouseCallback('Select Car', draw_rectangle)
while True:
    temp = template_frame.copy()
    if start_point and end_point:
        cv2.rectangle(temp, start_point, end_point, (0, 255, 0), 2)
    if anchor_points:
        for pt in anchor_points:
            cv2.circle(temp, pt, 5, (0, 0, 255), -1)
    cv2.imshow('Select Car', temp)
    if bbox or cv2.waitKey(1) & 0xFF == ord('q'):
        break
cv2.destroyWindow('Select Car')

# 重新设置视频流到第二帧
cap.set(cv2.CAP_PROP_POS_FRAMES, 1)

# ========== 跟踪器初始化 ==========
tld_tracker = None
csrt_tracker = None
if mode == 'tld' and bbox is not None:
    try:
        tld_tracker = cv2.legacy.TrackerTLD_create()
        ok = tld_tracker.init(template_frame, bbox)
        if not ok:
            print("TLD tracker initialization failed!")
            exit()
    except Exception as e:
        print("TLD tracker not available or failed to initialize:", e)
        exit()
elif mode == 'tld_binary' and bbox is not None:
    try:
        tld_tracker = cv2.legacy.TrackerTLD_create()
        template_gray = cv2.cvtColor(template_frame, cv2.COLOR_BGR2GRAY)
        fg_mask = background_subtractor.apply(template_gray)
        _, template_thresh = cv2.threshold(fg_mask, 25, 255, cv2.THRESH_BINARY)
        template_thresh_color = cv2.cvtColor(template_thresh, cv2.COLOR_GRAY2BGR)
        ok = tld_tracker.init(template_thresh_color, bbox)
        if not ok:
            print("TLD tracker (binary) initialization failed!")
            exit()
    except Exception as e:
        print("TLD tracker (binary) not available or failed to initialize:", e)
        exit()
elif mode == 'csrt' and bbox is not None:
    try:
        csrt_tracker = cv2.legacy.TrackerCSRT_create()
        ok = csrt_tracker.init(template_frame, bbox)
        if not ok:
            print("CSRT tracker initialization failed!")
            exit()
    except Exception as e:
        print("CSRT tracker not available or failed to initialize:", e)
        exit()
elif mode == 'csrt_binary' and bbox is not None:
    try:
        csrt_tracker = cv2.legacy.TrackerCSRT_create()
        template_gray = cv2.cvtColor(template_frame, cv2.COLOR_BGR2GRAY)
        fg_mask = background_subtractor.apply(template_gray)
        _, template_thresh = cv2.threshold(fg_mask, 25, 255, cv2.THRESH_BINARY)
        template_thresh_color = cv2.cvtColor(template_thresh, cv2.COLOR_GRAY2BGR)
        ok = csrt_tracker.init(template_thresh_color, bbox)
        if not ok:
            print("CSRT tracker (binary) initialization failed!")
            exit()
    except Exception as e:
        print("CSRT tracker (binary) not available or failed to initialize:", e)
        exit()

while cap.isOpened():
    ret, frame = cap.read()
    if not ret:
        break

    # ========== 画面稳定：三点仿射补偿 ==========
    if len(anchor_points) == ANCHOR_NUM and len(anchor_patches) == ANCHOR_NUM:
        src_pts = np.float32(anchor_points).reshape(-1, 2)
        dst_pts = []
        valid = True
        for i in range(ANCHOR_NUM):
            res = cv2.matchTemplate(frame, anchor_patches[i], cv2.TM_CCOEFF_NORMED)
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(res)
            patch_h, patch_w = anchor_patches[i].shape[:2]
            curr_pt = (max_loc[0] + patch_w // 2, max_loc[1] + patch_h // 2)
            dst_pts.append(curr_pt)
            # 高置信度且位移合理时自动更新模板
            if max_val > 0.92 and np.linalg.norm(np.array(curr_pt) - np.array(anchor_points[i])) < 20:
                x1 = max(0, curr_pt[0] - patch_w // 2)
                y1 = max(0, curr_pt[1] - patch_h // 2)
                x2 = min(frame.shape[1], curr_pt[0] + patch_w // 2 + 1)
                y2 = min(frame.shape[0], curr_pt[1] + patch_h // 2 + 1)
                anchor_patches[i] = frame[y1:y2, x1:x2].copy()
            # 匹配置信度过低则本帧整体跳过
            if max_val < 0.7:
                valid = False
        dst_pts = np.float32(dst_pts).reshape(-1, 2)
        if valid:
            M, _ = cv2.estimateAffinePartial2D(dst_pts, src_pts, method=cv2.RANSAC)
            if M is not None:
                frame = cv2.warpAffine(frame, M, (frame.shape[1], frame.shape[0]))

    show_frame = frame.copy()
    frame = crop_roi(frame, roi_w_ratio=1.0, roi_h_ratio=1.0)
    show_frame = crop_roi(show_frame, roi_w_ratio=1.0, roi_h_ratio=1.0)

    # 在show_frame上画出3个锚点
    for pt in anchor_points:
        cv2.circle(show_frame, pt, 5, (0, 0, 255), -1)

    if mode == 'tld' and tld_tracker is not None:
        ok, tld_bbox = tld_tracker.update(frame)
        if ok:
            x, y, w, h = [int(v) for v in tld_bbox]
            center = (x + w // 2, y + h // 2)
            points.append(center)
            cv2.rectangle(show_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        fg_mask = np.zeros(frame.shape[:2], dtype=np.uint8)
        thresh = np.zeros(frame.shape[:2], dtype=np.uint8)
    elif mode == 'tld_binary' and tld_tracker is not None:
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        fg_mask = background_subtractor.apply(gray)
        _, thresh = cv2.threshold(fg_mask, 25, 255, cv2.THRESH_BINARY)
        thresh_color = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
        ok, tld_bbox = tld_tracker.update(thresh_color)
        if ok:
            x, y, w, h = [int(v) for v in tld_bbox]
            center = (x + w // 2, y + h // 2)
            points.append(center)
            cv2.rectangle(show_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
    elif mode == 'csrt' and csrt_tracker is not None:
        ok, csrt_bbox = csrt_tracker.update(frame)
        if ok:
            x, y, w, h = [int(v) for v in csrt_bbox]
            center = (x + w // 2, y + h // 2)
            points.append(center)
            cv2.rectangle(show_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
        fg_mask = np.zeros(frame.shape[:2], dtype=np.uint8)
        thresh = np.zeros(frame.shape[:2], dtype=np.uint8)
    elif mode == 'csrt_binary' and csrt_tracker is not None:
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        fg_mask = background_subtractor.apply(gray)
        _, thresh = cv2.threshold(fg_mask, 25, 255, cv2.THRESH_BINARY)
        thresh_color = cv2.cvtColor(thresh, cv2.COLOR_GRAY2BGR)
        ok, csrt_bbox = csrt_tracker.update(thresh_color)
        if ok:
            x, y, w, h = [int(v) for v in csrt_bbox]
            center = (x + w // 2, y + h // 2)
            points.append(center)
            cv2.rectangle(show_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
    else:
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        fg_mask = background_subtractor.apply(gray)
        _, thresh = cv2.threshold(fg_mask, 25, 255, cv2.THRESH_BINARY)
        contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        target_center = None
        max_iou = 0
        if bbox and contours:
            bx, by, bw, bh = bbox
            for contour in contours:
                if cv2.contourArea(contour) > 100:
                    x, y, w, h = cv2.boundingRect(contour)
                    xx1 = max(bx, x)
                    yy1 = max(by, y)
                    xx2 = min(bx + bw, x + w)
                    yy2 = min(by + bh, y + h)
                    inter_area = max(0, xx2 - xx1) * max(0, yy2 - yy1)
                    bbox_area = bw * bh
                    contour_area = w * h
                    union_area = bbox_area + contour_area - inter_area
                    iou = inter_area / union_area if union_area > 0 else 0
                    if iou > max_iou:
                        max_iou = iou
                        M = cv2.moments(contour)
                        if M["m00"] != 0:
                            center_x = int(M["m10"] / M["m00"])
                            center_y = int(M["m01"] / M["m00"])
                            target_center = (center_x, center_y)
                            target_rect = (x, y, w, h)
            if target_center:
                points.append(target_center)
                x, y, w, h = target_rect
                cv2.rectangle(show_frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

    # 画轨迹
    for i in range(1, len(points)):
        if abs(points[i][0] - points[i - 1][0]) < 50 and abs(points[i][1] - points[i - 1][1]) < 50:
            cv2.line(show_frame, points[i - 1], points[i], (0, 0, 255), 2)

    # 只显示彩色帧和FG Mask，并上下拼接显示
    frame_show = show_frame.copy()
    fg_mask_color = cv2.cvtColor(fg_mask, cv2.COLOR_GRAY2BGR)
    if mode == 'tld_binary':
        win_name = 'Result_TLD_Binary'
    elif mode == 'tld':
        win_name = 'Result_TLD'
    elif mode == 'csrt':
        win_name = 'Result_CSRT'
    elif mode == 'csrt_binary':
        win_name = 'Result_CSRT_Binary'
    else:
        win_name = 'Result'
    concat_img = np.vstack([frame_show, fg_mask_color])
    cv2.imshow(win_name, concat_img)

    # 保存当前帧到视频
    out.write(show_frame)
    thresh_resized = cv2.resize(thresh, (width, height))
    thresh_out.write(thresh_resized)

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

    frame_index += 1

# 释放资源
cap.release()
out.release()
thresh_out.release()
cv2.destroyAllWindows()
