import cv2
import numpy as np

class Evaluator:
    def __init__(self):
        self.manual_gt = {}    # 手动标注
        self.auto_gt = {}      # 插值标注
        self.detections = []   # 检测记录
    
    def add_annotation(self, frame_num, x, y, w, h):
        self.manual_gt[frame_num] = (x, y, w, h)
    
    def interpolate_gt(self, total_frames):
        frames = sorted(self.manual_gt.keys())
        if len(frames) < 2:
            return
        
        for i in range(len(frames) - 1):
            start, end = frames[i], frames[i+1]
            sx, sy, sw, sh = self.manual_gt[start]
            ex, ey, ew, eh = self.manual_gt[end]
            
            for f in range(start, end):
                ratio = (f - start) / (end - start)
                x = int(sx + (ex - sx) * ratio)
                y = int(sy + (ey - sy) * ratio)
                w = int(sw + (ew - sw) * ratio)
                h = int(sh + (eh - sh) * ratio)
                self.auto_gt[f] = (x, y, w, h)
        
        # 填充首尾
        first, last = frames[0], frames[-1]
        self.auto_gt.update({f: self.manual_gt[first] for f in range(0, first)})
        self.auto_gt.update({f: self.manual_gt[last] for f in range(last, total_frames)})
    
    def calculate_metrics(self):
        tp = fp = fn = 0
        errors = []
        
        for record in self.detections:
            frame_num = record['frame_num']
            detected = record['detected']
            box = record['box']
            gt = self.auto_gt.get(frame_num, None)
            
            if detected:
                if gt is not None:
                    # 计算中心点误差
                    dx = (box[0] + box[2]/2) - (gt[0] + gt[2]/2)
                    dy = (box[1] + box[3]/2) - (gt[1] + gt[3]/2)
                    errors.append(np.sqrt(dx**2 + dy**2))
                    tp += 1
                else:
                    fp += 1
            else:
                if gt is not None:
                    fn += 1

        total_gt = len(self.auto_gt)
        fn_rate = fn / total_gt if total_gt > 0 else 0
        fp_rate = fp / (tp + fp) if (tp + fp) > 0 else 0
        
        return {
            'avg_error': np.mean(errors) if errors else 0,
            'max_error': np.max(errors) if errors else 0,
            'fn_rate': fn_rate,
            'fp_rate': fp_rate,
            'total_gt': total_gt,
            'detected': len(self.detections)
        }

def template_match(template, img):
    # 转换为灰度图
    img_g = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY).astype(np.float32)
    template_g = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY).astype(np.float32)
    H_i, W_i = img_g.shape
    H_t, W_t = template_g.shape

    # 计算积分图像
    integral_img = cv2.integral(img_g)  # 尺寸 (H_i+1, W_i+1)
    integral_sq_img = cv2.integral(img_g**2)

    # 模板统计量
    sum_T = np.sum(template_g)
    sum_T_sq = np.sum(template_g**2)
    mean_T = sum_T / (H_t * W_t)
    var_T = sum_T_sq - sum_T**2 / (H_t * W_t)
    var_T = max(var_T, 1e-10)  # 防止除零

    # 初始化相关图和最佳匹配参数
    corr_map = np.zeros((H_i - H_t + 1, W_i - W_t + 1))
    x_max, y_max = -1, -1
    max_corr = -1

    # 遍历所有可能位置（步长20加速匹配）
    for y in range(0, H_i - H_t + 1, 3):
        for x in range(0, W_i - W_t - 1, 3):
            # 计算当前区域的sum和sum_sq
            sum_patch = integral_img[y+H_t, x+W_t] - integral_img[y, x+W_t] - integral_img[y+H_t, x] + integral_img[y, x]
            sum_patch_sq = integral_sq_img[y+H_t, x+W_t] - integral_sq_img[y, x+W_t] - integral_sq_img[y+H_t, x] + integral_sq_img[y, x]
            
            mean_patch = sum_patch / (H_t * W_t)
            var_patch = sum_patch_sq - (sum_patch**2) / (H_t * W_t)
            var_patch = max(var_patch, 1e-10)

            # 计算I和T的点积
            patch = img_g[y:y+H_t, x:x+W_t]
            sum_IT = np.sum(patch * template_g)

            # 计算归一化互相关
            numerator = sum_IT - mean_patch * sum_T - mean_T * sum_patch + mean_patch * mean_T * (H_t * W_t)
            denominator = np.sqrt(var_patch * var_T)
            corr = numerator / denominator

            # 更新最佳匹配
            if corr > max_corr and 1 > corr:
                max_corr = corr
                x_max, y_max = x, y

    return x_max, y_max, max_corr

def main():
    # 初始化轨迹存储列表
    trajectory = []
    evaluator = Evaluator()

    # 读取模板图像
    template_paths = [
        r"source/m6.png",
        r"source/m3.png",
        r"source/m4.png",
        r"source/m7.png",
        r"source/m9.png",
        r"source/m1.png"
    ]
    templates = [cv2.imread(path).astype(np.float32) for path in template_paths]

    # 打开输入视频
    video_path = r"source/dajiang100.mp4"
    cap = cv2.VideoCapture(video_path)

    # 获取视频属性
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    
    # 创建输出视频
    output_path = 'output2.avi'
    fourcc = cv2.VideoWriter_fourcc(*'XVID')
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

    print("开始处理视频...（按Q键可提前结束）")
    frame_count = 0
    
    while True:
        ret, frame = cap.read()
        if not ret:
            break
        
        frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_CUBIC)
        frame_count += 1
        
        # 标注功能
        key = cv2.waitKey(1)
        if key == ord('s'):
            roi = cv2.selectROI('标注', frame, False)
            if roi != (0, 0, 0, 0):
                x, y, w, h = map(int, roi)
                evaluator.add_annotation(frame_count, x, y, w, h)
                print(f"已标注帧 {frame_count}")
        
        if 4 < frame_count < 144:
            print(f"处理第 {frame_count} 帧", end='\r')
            if frame_count <= 23:
                selected_template = templates[0]
            elif 23 < frame_count <= 42:
                selected_template = templates[1]
            elif 42 < frame_count <= 77:
                selected_template = templates[2]
            elif 77 < frame_count <= 83:
                selected_template = templates[3]
            elif 83 < frame_count <= 99:
                selected_template = templates[4]
            elif 99 < frame_count <= 160:
                selected_template = templates[5]
            # 进行模板匹配
            x, y, corr = template_match(selected_template, frame.astype(np.float32))
            
            # 记录检测结果
            if x != -1 and y != -1:
                H_t, W_t = selected_template.shape[:2]
                evaluator.detections.append({
                    'frame_num': frame_count,
                    'detected': True,
                    'box': (x, y, W_t, H_t)
                })
                
                # 绘制匹配结果
                center = (x + W_t // 2, y + H_t // 2)
                major_axis = max(W_t, H_t) // 2
                minor_axis = min(W_t, H_t) // 2
                cv2.ellipse(frame, center, (major_axis, minor_axis), 0, 0, 360, (255, 0, 0), 2)
                
                # 记录中心点到轨迹列表
                trajectory.append(center)
                
                # 限制轨迹长度
                if len(trajectory) > 50:
                    trajectory.pop(0)
                
                # 绘制轨迹线
                if len(trajectory) >= 2:
                    points = np.array(trajectory, dtype=np.int32)
                    cv2.polylines(frame, [points], False, (255, 0, 0), 2)
            else:
                evaluator.detections.append({
                    'frame_num': frame_count,
                    'detected': False,
                    'box': None
                })

        # 写入输出视频
        out.write(frame)
        
        # 实时显示（可选）
        cv2.imshow('Processing', frame)
        if cv2.waitKey(1) == ord('q'):
            break

    # 生成评估报告
    evaluator.interpolate_gt(frame_count)
    metrics = evaluator.calculate_metrics()
    print("\n==== 评估结果 ====")
    print(f"定位精度（平均误差）: {metrics['avg_error']:.2f}px")
    print(f"最大误差: {metrics['max_error']:.2f}px")
    print(f"漏检率: {metrics['fn_rate'] * 100:.1f}%")
    print(f"误检率: {metrics['fp_rate'] * 100:.1f}%")
    
    print("\n视频处理完成！输出保存至:", output_path)
    
    # 释放资源
    cap.release()
    out()
    cv2.destroyAllWindows()

if __name__ == "__main__":
    main()
