import os
import numpy as np
from PIL import Image, ImageDraw
from moviepy import VideoFileClip, ImageSequenceClip
import matplotlib.pyplot as plt
from tqdm import tqdm
import cv2  # ：只用于实时显示

def to_gray(np_img):
    return np.mean(np_img, axis=2) if np_img.ndim == 3 else np_img

def load_templates(template_folder):
    templates = []
    for fname in os.listdir(template_folder):
        if fname.lower().endswith(('.png', '.jpg', '.jpeg')):
            img = Image.open(os.path.join(template_folder, fname)).convert("RGB")
            img = img.resize((img.width // 2, img.height // 2))
            templates.append(to_gray(np.array(img)))
    return templates

def ncc_score(patch, template):
    patch_mean = np.mean(patch)
    template_mean = np.mean(template)
    numerator = np.sum((patch - patch_mean) * (template - template_mean))
    denominator = np.sqrt(np.sum((patch - patch_mean) ** 2) * np.sum((template - template_mean) ** 2)) + 1e-6
    return numerator / denominator

def find_best_match(frame_gray, templates, step=8):
    H, W = frame_gray.shape
    best_score = -1
    best_loc = (0, 0)
    for template in templates:
        th, tw = template.shape
        for y in range(0, H - th + 1, step):
            for x in range(0, W - tw + 1, step):
                patch = frame_gray[y:y+th, x:x+tw]
                score = ncc_score(patch, template)
                if score > best_score:
                    best_score = score
                    best_loc = (x + tw // 2, y + th // 2)
    return best_loc

def select_initial_position(frame):
    plt.imshow(frame)
    plt.title("点击选择目标位置")
    plt.axis('on')
    position = plt.ginput(1)
    plt.close()
    if position:
        return tuple(map(int, position[0]))
    else:
        raise ValueError("未选择目标位置，请重新运行程序并选择目标。")

def track_video(input_video, template_folder, output_video, skip_last_n=5):
    templates = load_templates(template_folder)
    clip = VideoFileClip(input_video)
    fps = clip.fps
    total_frames = int(clip.duration * fps)

    # 读取第一帧并选择目标位置
    first_frame = Image.fromarray(clip.get_frame(0)).convert("RGB")
    first_frame_resized = first_frame.resize((first_frame.width // 2, first_frame.height // 2))
    try:
        initial_position = select_initial_position(np.array(first_frame_resized))
    except ValueError as e:
        print(e)
        return

    result_frames = []
    trajectory = [initial_position]

    for frame_idx, frame in tqdm(enumerate(clip.iter_frames()), total=total_frames, desc="Tracking"):
        frame_pil = Image.fromarray(frame).convert("RGB")
        w, h = frame_pil.size
        small_frame = frame_pil.resize((w // 2, h // 2))
        frame_gray = to_gray(np.array(small_frame))

        if frame_idx >= total_frames - skip_last_n:
            center = trajectory[-1]
        else:
            center = find_best_match(frame_gray, templates, step=8)
            center = (center[0] * 2, center[1] * 2)

        trajectory.append(center)
        draw = ImageDraw.Draw(frame_pil)

        if len(trajectory) > 2:
            trajectory_to_draw = trajectory[-3:] if len(trajectory) >= 3 else trajectory
            draw.line(trajectory_to_draw, fill=(255, 0, 0), width=2)

        draw.ellipse((center[0]-3, center[1]-3, center[0]+3, center[1]+3), fill=(255, 0, 0))

        frame_np = np.array(frame_pil)
        result_frames.append(frame_np)

        # ===== 实时显示部分 =====
        cv2.imshow("Tracking", cv2.cvtColor(frame_np, cv2.COLOR_RGB2BGR))
        if cv2.waitKey(1) & 0xFF == ord('q'):
            print("用户终止了跟踪。")
            break
        # ========================

    cv2.destroyAllWindows()  # 关闭所有窗口

    # 输出视频（截断提前终止时的帧数）
    result_clip = ImageSequenceClip(result_frames, fps=fps)
    result_clip.write_videofile(output_video, codec='libx264')

# 用法示例
if __name__ == "__main__":
    track_video("video.mp4", "templates", "output/output_tracked.mp4", skip_last_n=0)
