import cv2
import numpy as np
from tqdm import tqdm

'''动态表情包跟踪'''

# 多尺度模板
custom_scales = [0.3, 0.4, 0.5, 0.8, 0.9, 1.1, 1.3]
# 轨迹记录
trace_list = []


def load_image(filename):
    """加载图像并转为灰度图"""
    img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
    if img is None:
        raise ValueError(f"无法加载图像文件：{filename}")
    return img


def compute_mean(data):
    """计算二维数组的均值"""
    return np.sum(data) / data.size


def compute_stddev(data, mean):
    """计算二维数组的标准差（总体标准差）"""
    return np.sqrt(np.sum((data - mean) ** 2) / data.size)


def ncc_matching_optimized(image, template):
    """使用积分图优化的NCC模板匹配"""
    best_ncc = -1.0
    best_x, best_y = 0, 0

    t_h, t_w = template.shape
    i_h, i_w = image.shape

    # 预处理模板统计量
    t_mean = compute_mean(template)
    t_std = compute_stddev(template, t_mean)
    if t_std < 1e-10:  # 避免除零
        t_std = 1e-10

    # 转换为浮点类型并计算积分图
    image_float = image.astype(np.float64)
    int_image = cv2.integral(image_float)
    sq_int_image = cv2.integral(np.square(image_float))

    # 积分图查询函数
    def get_sum(integral, x, y):
        return integral[y + t_h, x + t_w] - integral[y, x + t_w] - integral[y + t_h, x] + integral[y, x]

    # 由于表情包上的目标不一定会符合运动学的移动轨迹，所以该份代码采用全局搜索
    # 两阶段搜索参数
    big_step = 25
    small_step = 5

    # 第一阶段：粗搜索
    for y in range(0, i_h - t_h + 1, big_step):
        for x in range(0, i_w - t_w + 1, big_step):
            # 计算区域统计量
            sum_r = get_sum(int_image, x, y)
            # r_mean = sum_r / (t_h * t_w)

            sum_sq = get_sum(sq_int_image, x, y)
            r_std = np.sqrt(max((sum_sq - sum_r ** 2 / (t_h * t_w)) / (t_h * t_w), 0))

            # 计算点积
            region = image_float[y:y + t_h, x:x + t_w]
            sum_tr = np.sum(template * region)

            # 计算NCC
            numerator = sum_tr - t_mean * sum_r
            denominator = t_std * r_std * t_h * t_w
            current_ncc = numerator / denominator if denominator != 0 else 0
            if current_ncc > best_ncc:
                best_ncc = current_ncc
                best_x, best_y = x, y

    # 第二阶段：精搜索
    y_start = max(best_y - big_step, 0)
    y_end = min(best_y + big_step, i_h - t_h)
    x_start = max(best_x - big_step, 0)
    x_end = min(best_x + big_step, i_w - t_w)

    for y in range(y_start, y_end + 1, small_step):
        for x in range(x_start, x_end + 1, small_step):

            sum_r = get_sum(int_image, x, y)
            # r_mean = sum_r / (t_h * t_w)

            sum_sq = get_sum(sq_int_image, x, y)
            r_std = np.sqrt(max((sum_sq - sum_r ** 2 / (t_h * t_w)) / (t_h * t_w), 0))

            region = image_float[y:y + t_h, x:x + t_w]
            sum_tr = np.sum(template * region)

            numerator = sum_tr - t_mean * sum_r
            denominator = t_std * r_std * t_h * t_w
            current_ncc = numerator / denominator if denominator != 0 else 0

            if current_ncc > best_ncc:
                best_ncc = current_ncc
                best_x, best_y = x, y
    return best_x, best_y, best_ncc


def updata_trace_list(box_center, trace_list, max_list_len=50):
    """记录表情包轨迹"""
    if len(trace_list) <= max_list_len:
        trace_list.append(box_center)
    else:
        trace_list.pop(0)
        trace_list.append(box_center)
    return trace_list

def process_frame(image_rgb, templates, scales):
    """处理每一个视频帧"""
    # 初始化参数
    global trace_list
    image_grey = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2GRAY)
    best_i = 0
    best_ncc = -1.0
    best_pos = (0, 0)
    best_template_size = (0, 0)
    best_scale = 1.0
    # 遍历每个模板
    for i, template in enumerate(templates):
        # 遍历每个尺度
        for scale in scales:
            # 计算缩放后的模板尺寸
            new_w = int(round(template.shape[1] * scale))
            new_h = int(round(template.shape[0] * scale))
            # 跳过无效尺寸
            if new_w < 1 or new_h < 1:
                continue
            if new_h > image_grey.shape[0] or new_w > image_grey.shape[1]:
                continue
            # 生成缩放模板（使用抗锯齿处理）
            interpolation = cv2.INTER_AREA if scale < 1.0 else cv2.INTER_CUBIC
            scaled_template = cv2.resize(template, (new_w, new_h), interpolation=interpolation)
            # 执行优化后的NCC匹配
            x, y, ncc = ncc_matching_optimized(image_grey, scaled_template)
            if ncc > best_ncc:
                best_ncc = ncc
                best_pos = (x, y)
                best_template_size = (new_w, new_h)
                best_scale = scale
                best_i = i

    if best_ncc > 0.4:  # 调整阈值以适应实际情况
        box_center = (int(best_pos[0]+best_template_size[0]/2), int(best_pos[1]+best_template_size[1]/2))
        trace_list = updata_trace_list(box_center, trace_list, 30)
        for i, item in enumerate(trace_list):
            if i < 1:
                continue
            cv2.line(image_rgb,
                     (trace_list[i][0], trace_list[i][1]), (trace_list[i - 1][0], trace_list[i - 1][1]),
                     (0, 0, 255), 3)
        thickness = max(1, int(2 * best_scale))
        cv2.rectangle(image_rgb,
                      best_pos,
                      (best_pos[0] + best_template_size[0], best_pos[1] + best_template_size[1]),
                      (100, 100, 255), thickness)
        #
        # # 添加信息标注
        # text = f"Scale: {best_scale:.2f}, NCC: {best_ncc:.3f}"
        # cv2.putText(image_rgb, text,
        #             (best_pos[0], best_pos[1] - 10),
        #             cv2.FONT_HERSHEY_SIMPLEX, 0.5 * best_scale,
        #             (0, 255, 0), thickness=1)
    # # 显示结果
    # plt.figure(figsize=(10, 6))
    # plt.imshow(image_rgb)
    # plt.axis('off')
    # plt.show()
    return image_rgb


def generate_video(input_path='cartoon_data/cartoon.mp4',
                   templates=['cartoon_data/small_template_1.png','cartoon_data/small_template_2.png','cartoon_data/small_template_3.png']):
    """处理视频"""
    templates = [load_image(t) for t in templates]
    templates = [t.astype(np.float64) for t in templates]
    filehead = input_path.split('/')[-1]
    output_path = "out-" + filehead
    print('视频开始处理', input_path)

    # 获取视频总帧数
    cap = cv2.VideoCapture(input_path)
    frame_count = 0
    while (cap.isOpened()):
        success, frame = cap.read()
        frame_count += 1
        if not success:
            break
    cap.release()
    print('视频总帧数为', frame_count)

    cap = cv2.VideoCapture(input_path)
    frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    fps = cap.get(cv2.CAP_PROP_FPS)

    out = cv2.VideoWriter(output_path, fourcc, fps, (int(frame_size[0]), int(frame_size[1])))

    # 进度条绑定视频总帧数
    with tqdm(total=frame_count - 1) as pbar:
        try:
            while (cap.isOpened()):
                success, frame = cap.read()
                if not success:
                    break

                # 处理帧
                try:
                    frame = process_frame(frame, templates,scales=custom_scales)
                except Exception as error:
                    print('报错！', error)
                    pass

                if success == True:
                    # cv2.imshow('Video Processing', frame)
                    out.write(frame)

                    # 进度条更新一帧
                    pbar.update(1)
        except:
            print('中途中断')
            pass

    cv2.destroyAllWindows()
    out.release()
    cap.release()
    print('视频已保存', output_path)


if __name__ == "__main__":
    generate_video()