import cv2
import numpy as np
import ncc  # 导入我们的优化版归一化互相关算法
import time
# 用于在图像上绘制ROI
drawing = False
ix, iy = -1, -1
roi = None

# 鼠标回调函数，用于选择ROI
def drawLine(event, x, y, flags, param):
    global ix, iy, drawing, roi

    if event == cv2.EVENT_LBUTTONDOWN:
        drawing = True
        ix, iy = x, y

    elif event == cv2.EVENT_MOUSEMOVE:
        if drawing:
            # 绘制实时的矩形框
            temp_img = image.copy()
            cv2.rectangle(temp_img, (ix, iy), (x, y), (255, 255, 0), 2)
            cv2.imshow("video", temp_img)

    elif event == cv2.EVENT_LBUTTONUP:
        drawing = False
        roi = (ix, iy, x - ix, y - iy)  # 记录ROI坐标
        cv2.rectangle(image, (ix, iy), (x, y), (0, 255, 0), 2)
        cv2.imshow("video", image)

# 选择ROI的函数
def select_rois_from_video(video_path, num_frames=5):
    global image
    cap = cv2.VideoCapture(video_path)
    templates = []
    frame_id = 0

    print(f"[提示] 将展示前 {num_frames} 帧用于选择 ROI，按 Enter 确认，ESC 跳过该帧。")

    while frame_id < num_frames:
        ret, frame = cap.read()
        if not ret:
            break

        image = frame.copy()
        cv2.namedWindow("video")
        cv2.setMouseCallback("video", drawLine)  # 设置鼠标回调函数

        cv2.imshow("video", image)

        # 等待键盘事件
        key = cv2.waitKey(0) & 0xFF

        if key == 27:  # 按ESC跳过当前帧
            cv2.destroyAllWindows()
            frame_id += 1
            continue
        elif key == 13 and roi is not None:  # 按Enter键确认当前ROI
            x, y, w, h = roi
            if w > 0 and h > 0:
                roi_img = cv2.cvtColor(frame[y:y + h, x:x + w], cv2.COLOR_BGR2GRAY)
                templates.append(roi_img)
            cv2.destroyAllWindows()
            frame_id += 1
        else:
            # 如果没有选择ROI，则跳过当前帧
            cv2.destroyAllWindows()
            frame_id += 1

    cap.release()
    return templates

def add_template(template_pool, new_template, max_templates=5):
    template_pool.append(new_template)
    if len(template_pool) > max_templates:
        template_pool.pop(0)
    return template_pool

def create_kalman():
    kalman = cv2.KalmanFilter(4, 2)
    kalman.measurementMatrix = np.array([[1, 0, 0, 0],
                                         [0, 1, 0, 0]], np.float32)
    kalman.transitionMatrix = np.array([[1, 0, 1, 0],
                                        [0, 1, 0, 1],
                                        [0, 0, 1, 0],
                                        [0, 0, 0, 1]], np.float32)
    kalman.processNoiseCov = np.eye(4, dtype=np.float32) * 0.03
    kalman.measurementNoiseCov = np.eye(2, dtype=np.float32) * 0.5
    return kalman

def template_matching(image, template):
    """
    使用优化的NumPy实现的归一化互相关进行模板匹配
    根据图像和模板大小选择最合适的优化版本
    """
    # 判断使用哪个优化等级的实现
    h, w = template.shape
    if h * w > 10000:  # 大模板
        return ncc.fast_tm_ccoeff_normed(image, template)
    else:  # 中小模板
        return ncc.tm_ccoeff_normed_numpy_optimized(image, template)

def find_max_location(result):
    """找到匹配结果中的最大值位置"""
    min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)
    return max_val, max_loc

def track_with_templates_and_kalman(
    video_path,
    templates,
    output_path,
    threshold=0.5,
    update_thresh=0.85,
    max_templates=5,
    lost_frames_threshold=5,  # 连续丢失帧数阈值
    position_change_threshold=100,  # 新增：位置变化阈值（像素）
    score_drop_threshold=0.25  # 新增：分数下降阈值
):
    cap = cv2.VideoCapture(video_path)
    fps = cap.get(cv2.CAP_PROP_FPS)
    frames = []
    trajectory = []
    occluded_flags = []

    if not templates:
        raise ValueError("没有有效模板")

    th, tw = templates[0].shape
    kalman = create_kalman()
    initialized = False
    
    # 获取视频总帧数用于进度显示
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    frame_count = 0
    
    # 用于记录连续丢失的帧数
    consecutive_lost_frames = 0
    # 目标状态：True表示目标存在，False表示目标丢失
    target_present = True
    
    # 记录历史匹配分数和位置
    prev_score = None
    prev_loc = None
    last_valid_center = None
    
    # 追踪动态分数阈值
    dynamic_threshold = threshold
    
    while True:
        ret, frame = cap.read()
        if not ret:
            break
            
        frame_count += 1

        if frame_count % 10 == 0:
            print(f"处理帧: {frame_count}/{total_frames} ({frame_count/total_frames*100:.1f}%)")
            
        frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

        best_score = -np.inf
        best_loc = None

        # 使用优化的归一化互相关进行模板匹配
        for template in templates:
            # 替换为我们自己实现的归一化互相关
            res = template_matching(frame_gray, template)
            
            # 找出最佳匹配位置
            max_val, max_loc = find_max_location(res)
            
            if max_val > best_score:
                best_score = max_val
                best_loc = max_loc
        
        # 判断是否匹配到目标，使用动态阈值
        matched = best_score >= dynamic_threshold
        
        # 检测突然的位置变化或分数下降，这可能是误匹配
        position_jump = False
        score_drop = False
        
        if prev_loc is not None and best_loc is not None:
            # 计算当前最佳位置与前一个位置的距离
            dx = best_loc[0] - prev_loc[0]
            dy = best_loc[1] - prev_loc[1]
            distance = np.sqrt(dx*dx + dy*dy)
            
            # 如果位置变化太大，可能是误匹配到类似结构
            if distance > position_change_threshold:
                position_jump = True
                print(f"位置突变: {distance:.1f}像素，可能是误匹配")
        
        if prev_score is not None and matched:
            # 检测匹配分数是否突然下降
            score_change = prev_score - best_score
            if score_change > score_drop_threshold:
                score_drop = True
                print(f"分数下降: {score_change:.2f}，从{prev_score:.2f}到{best_score:.2f}")
        
        # 根据匹配质量调整匹配标志
        if matched and (position_jump or score_drop):
            print("检测到可能的误匹配，忽略此匹配结果")
            matched = False  # 拒绝可疑匹配
        
        if matched:
            center = (best_loc[0] + tw // 2, best_loc[1] + th // 2)
            if not initialized:
                kalman.statePre = np.array([[center[0]], [center[1]], [0], [0]], dtype=np.float32)
                kalman.statePost = kalman.statePre.copy()
                initialized = True
                last_valid_center = center
            else:
                # 更新最后有效的中心点
                last_valid_center = center

            measurement = np.array([[np.float32(center[0])], [np.float32(center[1])]])
            kalman.correct(measurement)
            occluded_flags.append(False)
            
            # 记录当前位置和分数
            prev_loc = best_loc
            prev_score = best_score
            
            # 重置连续丢失帧数
            consecutive_lost_frames = 0
            target_present = True

            # 自动模板更新 - 只在高置信度时更新
            if best_score >= update_thresh and not position_jump:
                patch = frame_gray[best_loc[1]:best_loc[1] + th, best_loc[0]:best_loc[0] + tw]
                if patch.shape == (th, tw):
                    templates = add_template(templates, patch, max_templates)
        else:
            # 增加连续丢失帧数
            consecutive_lost_frames += 1
            
            # 如果连续丢失帧数超过阈值，认为目标已消失
            if consecutive_lost_frames > lost_frames_threshold:
                target_present = False
                # 当目标消失时，重置匹配分数记录
                prev_score = None
                prev_loc = None
            
            predicted = kalman.predict()
            center = (int(predicted[0]), int(predicted[1]))
            occluded_flags.append(True)

        predicted = kalman.predict()
        center = (int(predicted[0]), int(predicted[1]))
        trajectory.append(center)
        result_frame = frame.copy()
        if(frame_count<=150):
            # 绘制跟踪结果
            # 只有在目标存在时才绘制目标框
            if target_present:
                # 绘制目标框
                if not occluded_flags[-1]:  # 如果目标没有被遮挡
                    cv2.rectangle(result_frame, best_loc, (best_loc[0] + tw, best_loc[1] + th), (0, 255, 255), 2)
                    # 显示匹配分数
                    score_text = f"{best_score:.2f}"
                    cv2.putText(result_frame, score_text, (best_loc[0], best_loc[1] - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0), 1)
                elif consecutive_lost_frames <= lost_frames_threshold:
                    # 目标短暂消失但还在跟踪中，显示预测位置
                    pred_x, pred_y = center[0] - tw // 2, center[1] - th // 2
                    cv2.rectangle(result_frame, (pred_x, pred_y), (pred_x + tw, pred_y + th), (0, 165, 255), 2)  # 橙色框表示预测位置
                    cv2.putText(result_frame, "Predicted", (pred_x, pred_y - 5),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 165, 255), 1)
                
                # 只有在目标存在时才绘制中心点
                cv2.circle(result_frame, center, 4, (0, 255, 255), -1)
            
            # 绘制轨迹 (可选：只绘制最近几帧的轨迹)
            if len(trajectory) > 1:
                # 可以选择只绘制最近N帧的轨迹
                display_trajectory_length = min(30, len(trajectory))
                start_idx = max(0, len(trajectory) - display_trajectory_length)
                
                for i in range(start_idx + 1, len(trajectory)):
                    # 只有在目标存在时才绘制轨迹
                    if i <= len(occluded_flags):
                        color = (0, 255, 255) if occluded_flags[i-1] else (0, 255, 255)
                        cv2.line(result_frame, trajectory[i - 1], trajectory[i], color, 2)
        
        # 显示目标状态
        status_text = "Target: Detected" if target_present and frame_count<=150 else "Target: Lost"
        cv2.putText(result_frame, status_text, (10, 30), 
                    cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0) if target_present else (0, 0, 255), 2)
        
        frames.append(result_frame)

    cap.release()

    if frames:
        h, w, _ = frames[0].shape
        out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h))
        for frame in frames:
            out.write(frame)
        out.release()
        print(f"[输出] 视频已保存至：{output_path}")
    else:
        print("[警告] 没有生成帧")

def enhance_templates(templates):
    """增强模板质量"""
    enhanced_templates = []
    for template in templates:
        # 应用高斯模糊去除噪声
        blurred = cv2.GaussianBlur(template, (3, 3), 0)
        # 应用自适应直方图均衡化增强对比度
        clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
        enhanced = clahe.apply(blurred)
        enhanced_templates.append(enhanced)
        # 同时保留原始模板，增加多样性
        enhanced_templates.append(template)
    
    return enhanced_templates

# === 示例入口 ===
if __name__ == "__main__":
    video_path = "大疆无人机航拍骑车人.mp4"
    output_path = "output/1_optimized_ncc_tracking.mp4"

    print("======== 高性能模板跟踪系统 ========")
    print("使用优化的NumPy实现归一化互相关算法")
    print("====================================")
    
    # 选择ROI并获取初始模板
    templates = select_rois_from_video(video_path, num_frames=10)
    #image = cv2.imread('templates/1.png')
    #templates.append(image)
    
    # 增强模板质量
    enhanced_templates = enhance_templates(templates)
    
    print(f"[信息] 已选择 {len(templates)} 个原始模板，增强后共有 {len(enhanced_templates)} 个模板")


    start_time = time.time()  # 记录开始时间



    # 使用优化的归一化互相关进行跟踪
    track_with_templates_and_kalman(
        video_path, 
        enhanced_templates, 
        output_path,
        threshold=0.4,                # 匹配阈值
        update_thresh=0.6,            # 模板更新阈值
        max_templates=8,              # 最大模板数量
        lost_frames_threshold=3,      # 连续丢失帧数阈值，减小为3帧使跟踪框更快消失
        position_change_threshold=80, # 位置变化阈值（像素）
        score_drop_threshold=0.2      # 分数下降阈值
    )
    
    end_time = time.time()  # 记录结束时间

    elapsed_time = end_time - start_time  # 计算耗时
    print(f"运行时间：{elapsed_time:.4f} 秒")