import cv2
import torch
import torch.nn.functional as F

# ===================== CONFIG =====================
TEMPLATE_UPDATE_THRESHOLD = 0.7  # 高于此置信度才允许更新模板
VIDEO_PATH = "riding man.mp4"
TEMPLATE_PATH = "riding man target.png"
NCC_THRESHOLD = 0.9
MAX_FRAMES = 200
TRACK_WINDOW = 100
FULL_SEARCH_FRAMES = 200
DEBUG = True
USE_GPU = True


# ===================== UTILS =====================
def resize_template(template, scale):
    # 根据缩放比例调整模板图像尺寸，适应不同尺度的目标
    h, w = template.shape
    return cv2.resize(template, (int(w * scale), int(h * scale)), interpolation=cv2.INTER_AREA)


def compute_ncc_map_gpu(image, template, batch_cols=64):
    # 在GPU上使用PyTorch计算图像与模板的归一化互相关（NCC）匹配得分图
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    image_tensor = torch.tensor(image, dtype=torch.float32, device=device).unsqueeze(0).unsqueeze(0) / 255.0
    template_tensor = torch.tensor(template, dtype=torch.float32, device=device).unsqueeze(0).unsqueeze(0) / 255.0

    ih, iw = image.shape
    th, tw = template.shape
    oh, ow = ih - th + 1, iw - tw + 1

    template_flat = template_tensor.view(1, -1)
    template_mean = template_flat.mean()
    template_std = template_flat.std() + 1e-5
    template_norm = (template_flat - template_mean) / template_std

    ncc_map = torch.full((oh, ow), -1.0, device=device)

    for col_start in range(0, ow, batch_cols):
        col_end = min(col_start + batch_cols, ow)
        patch_cols = col_end - col_start

        crop = image_tensor[:, :, :, col_start:col_end + tw - 1]  # 动态列窗口裁剪
        patches = F.unfold(crop, kernel_size=(th, tw))  # 提取滑动窗口
        patches = patches.squeeze().T  # 转置为 (patch数, 窗口向量)

        patch_mean = patches.mean(dim=1, keepdim=True)
        patch_std = patches.std(dim=1, keepdim=True) + 1e-5
        patches_norm = (patches - patch_mean) / patch_std

        ncc = torch.matmul(patches_norm, template_norm.T).squeeze() / (th * tw)  # 计算相似度
        ncc = ncc.view(oh, patch_cols)
        ncc_map[:, col_start:col_end] = ncc

    return ncc_map.detach().cpu().numpy()

def search_best_match(frame, template_orig, last_pos=None, debug_prefix="frame", current_scale=1.0):
    # 在当前帧中搜索与模板最匹配的位置（仅使用当前或初始化尺度）
    best_score = -1
    best_loc = None
    best_scale = 1.0
    fh, fw = frame.shape

    if last_pos is None:
        search_scales = [1.0]  # 首次匹配强制使用 1.0
    else:
        # 当前帧仅使用上一次尺度（动态模板已内建缩放）
        search_scales = [1.0] if last_pos is None else [current_scale]
    for scale in search_scales:
        template = resize_template(template_orig, scale)
        th, tw = template.shape

        if th >= fh or tw >= fw:
            print(f"[WARNING] Template too large at scale {scale}: ({tw}x{th}) vs frame ({fw}x{fh})")
            continue

        ncc_map = compute_ncc_map_gpu(frame, template)

        if last_pos is not None:
            cx, cy = last_pos
            xmin = max(cx - TRACK_WINDOW, 0)
            xmax = min(cx + TRACK_WINDOW, ncc_map.shape[1])
            ymin = max(cy - TRACK_WINDOW, 0)
            ymax = min(cy + TRACK_WINDOW, ncc_map.shape[0])
            ncc_map_crop = ncc_map[ymin:ymax, xmin:xmax]
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(ncc_map_crop)
            match_x = max_loc[0] + xmin
            match_y = max_loc[1] + ymin
        else:
            min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(ncc_map)
            match_x, match_y = max_loc
            max_val = float(max_val)

        if max_val > best_score:
            best_score = max_val
            best_loc = (match_x, match_y)
            best_scale = scale

    if DEBUG:
        print(f"[DEBUG] Best Score: {best_score:.4f} at {best_loc} scale={best_scale}")

    return best_loc, best_scale, best_score

# ===================== MAIN TRACKING =====================
def track_vehicle(video_path, template_path):
    # 主流程函数：初始化视频与模板，执行逐帧车辆匹配与轨迹绘制
    template_full = cv2.imread(template_path, cv2.IMREAD_GRAYSCALE)
    if template_full is None:
        raise FileNotFoundError("模板图像加载失败，请检查路径！")

    crop_x, crop_y, crop_w, crop_h = 725, 430, 25, 30
    template_orig = template_full[crop_y:crop_y + crop_h, crop_x:crop_x + crop_w]

    cv2.imwrite("debug_template.png", template_orig)  # 保存裁剪后的模板供调试查看
    print(f"[INFO] 模板图尺寸: {template_orig.shape[::-1]}")

    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        raise IOError("视频加载失败，请检查路径或格式！")

    trajectory = []
    last_pos = None
    frame_id = 0
    last_scale = 1.0

    while True:
        ret, frame = cap.read()
        if not ret or frame_id >= MAX_FRAMES:
            break

        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        if frame_id == 0:
            cv2.imwrite("frame_0000.png", gray)  # 保存首帧图像供调试
            print(f"[INFO] 首帧尺寸: {gray.shape[::-1]}")

        if frame_id < FULL_SEARCH_FRAMES:
            match_loc, scale, score = search_best_match(gray, template_orig, last_pos=None, debug_prefix=f"frame_{frame_id:04d}", current_scale=last_scale)
        else:
            match_loc, scale, score = search_best_match(gray, template_orig, last_pos, debug_prefix=f"frame_{frame_id:04d}", current_scale=last_scale)

        valid = False
        if match_loc and score > NCC_THRESHOLD:
            x, y = match_loc
            template = resize_template(template_orig, scale)
            th, tw = template.shape
            center = (x + tw // 2, y + th // 2)

            trajectory.append(center)
            # 高置信度时动态更新模板（提升远距离匹配）
            if score > TEMPLATE_UPDATE_THRESHOLD:
                template_orig = gray[y:y+th, x:x+tw].copy()
            last_pos = center
            last_scale = scale
            valid = True
        else:
            last_pos = None

        if valid:
            cv2.circle(frame, center, max(th, tw) // 2, (255, 0, 0), 2)  # 绘制蓝色圆圈
        else:
            last_pos = None

        # 始终绘制轨迹
        for i in range(1, len(trajectory)):
            cv2.line(frame, trajectory[i - 1], trajectory[i], (255, 0, 0), 2)

        cv2.imshow("Tracking", frame)  # 实时显示跟踪效果
        if cv2.waitKey(1) & 0xFF == ord('q'):  # 按下 'q' 键退出
            break

        frame_id += 1

    cap.release()
    cv2.destroyAllWindows()
    print(f"跟踪完成，共跟踪帧数: {len(trajectory)}")
    return trajectory

# ===================== RUN =====================
if __name__ == "__main__":
    track_vehicle(VIDEO_PATH, TEMPLATE_PATH)
