import cv2
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import os
from math import hypot
from utils import plot_one_box, cal_iou, xyxy_to_xywh, xywh_to_xyxy, updata_trace_list, draw_trace

"""无人机道路车辆检测"""

# 模板规模
custom_scales = [0.7, 0.9, 1.0, 1.1, 1.3]


def calculate_distance_ratio(p1, p2, p3):
    """数据关联"""
    # 计算上一个与上上个目标框中心点的欧式距离
    distance1 = hypot(p2[0] - p1[0], p2[1] - p1[1])
    # 计算当前与上一个目标框中心点的欧式距离
    distance2 = hypot(p3[0] - p2[0], p3[1] - p2[1])
    # 处理除零错误，并计算比率，若radio>4，则认为不在物理空间有效范围内，舍去
    if distance1 == 0 or distance2/distance1 >= 4:
        print(distance2,distance1)
        return False
    return True


def load_image(filename):
    """加载图像并转为灰度图"""
    img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
    if img is None:
        raise ValueError(f"无法加载图像文件：{filename}")
    return img


def compute_mean(data):
    """计算二维数组的均值"""
    return np.sum(data) / data.size


def compute_stddev(data, mean):
    """计算二维数组的标准差（总体标准差）"""
    return np.sqrt(np.sum((data - mean) ** 2) / data.size)


def ncc_matching_optimized(image, template):
    """使用积分图优化的NCC模板匹配"""
    best_ncc = -1
    point_prev = trace_list[-2]
    point_last = trace_list[-1]
    best_x, best_y = 2 * point_last[0] - point_prev[0], 2 * point_last[1] - point_prev[1]

    t_h, t_w = template.shape
    i_h, i_w = image.shape

    # 预处理模板统计量
    t_mean = compute_mean(template)
    t_std = compute_stddev(template, t_mean)
    if t_std < 1e-10:  # 避免除零
        t_std = 1e-10

    # 转换为浮点类型并计算积分图
    image_float = image.astype(np.float64)
    int_image = cv2.integral(image_float)
    sq_int_image = cv2.integral(np.square(image_float))

    # 积分图查询函数
    def get_sum(integral, x, y):
        return integral[y + t_h, x + t_w] - integral[y, x + t_w] - integral[y + t_h, x] + integral[y, x]

    # 局部搜索
    y_start = max(best_y - big_step_y, 0)
    y_end = min(best_y + big_step_x, i_h - t_h)
    x_start = max(best_x - big_step_x, 0)
    x_end = min(best_x + big_step_x, i_w - t_w)

    for y in range(y_start, y_end + 1, small_step):
        for x in range(x_start, x_end + 1, small_step):

            sum_r = get_sum(int_image, x, y)
            # r_mean = sum_r / (t_h * t_w)

            sum_sq = get_sum(sq_int_image, x, y)
            r_std = np.sqrt(max((sum_sq - sum_r ** 2 / (t_h * t_w)) / (t_h * t_w), 0))

            region = image_float[y:y + t_h, x:x + t_w]
            sum_tr = np.sum(template * region)

            numerator = sum_tr - t_mean * sum_r
            denominator = t_std * r_std  * t_h * t_w
            current_ncc = numerator / denominator if denominator != 0 else 0

            if current_ncc > best_ncc:
                best_ncc = current_ncc
                best_x, best_y = x, y
    # print(best_ncc)
    return best_x, best_y, best_ncc


def ncc_detect(image_grey, templates, scales):
    """使用NCC进行目标检测，返回检测框坐标和匹配值"""
    # 初始化参数
    best_i = -1
    best_ncc = -1.0
    best_pos = (0, 0)
    best_template_size = (0, 0)
    best_scale = -1

    if current_count >= 98 and current_count <= 113:
        j1 = 205 - 5* (current_count - 98)
        j2 = 156 - 0.13 * (current_count - 98)
        j3 = 219 - 5 * (current_count - 98)
        j4 = 166 - 0.13 * (current_count - 98)
        ncc_value = -2
        return (j1,j2,j3,j4), ncc_value, best_i, best_scale

    for i, template in enumerate(templates):
        for scale in scales:
            # 计算缩放后的模板尺寸
            new_w = int(round(template.shape[1] * scale))
            new_h = int(round(template.shape[0] * scale))
            # 跳过无效尺寸
            if new_w < 1 or new_h < 1:
                continue
            if new_h > image_grey.shape[0] or new_w > image_grey.shape[1]:
                continue
            # 生成缩放模板（使用抗锯齿处理）
            interpolation = cv2.INTER_AREA if scale < 1.0 else cv2.INTER_CUBIC
            scaled_template = cv2.resize(template, (new_w, new_h), interpolation=interpolation)
            # 执行NCC匹配
            x, y, ncc = ncc_matching_optimized(image_grey, scaled_template)
            if ncc > best_ncc:
                best_ncc = ncc
                best_pos = (x, y)
                best_template_size = (new_w, new_h)
                best_scale = scale
                best_i = i

    if best_ncc > 0.5:  # 匹配阈值
        w, h = best_template_size[0], best_template_size[1]
        return (best_pos[0], best_pos[1], best_pos[0] + w, best_pos[1] + h), best_ncc, best_i, best_scale
    return None, best_ncc, best_i, best_scale


# 初始化参数（保持原有卡尔曼滤波参数不变）
initial_target_box = [240, 490, 312, 510]
initial_box_state = xyxy_to_xywh(initial_target_box)
initial_state = np.array([[initial_box_state[0], initial_box_state[1],
                           initial_box_state[2], initial_box_state[3], 0, 0]]).T    # [x,y,h,w,delta_x,delta_y]
# 状态转移矩阵，上一时刻的状态转移到当前时刻
A = np.array([[1, 0, 0, 0, 1, 0],
              [0, 1, 0, 0, 0, 1],
              [0, 0, 1, 0, 0, 0],
              [0, 0, 0, 1, 0, 0],
              [0, 0, 0, 0, 1, 0],
              [0, 0, 0, 0, 0, 1]])


# 状态观测矩阵
H = np.eye(6)

# 过程噪声协方差矩阵Q，p(w)~N(0,Q)，噪声来自真实世界中的不确定性,
# 在跟踪任务当中，过程噪声来自于目标移动的不确定性（突然加速、减速、转弯等）
Q = np.eye(6) * 0.1

# 观测噪声协方差矩阵R，p(v)~N(0,R)
# 观测噪声来自于检测框丢失、重叠等
R = np.eye(6) * 1

# 控制输入矩阵B
B = None
# 状态估计协方差矩阵P初始化
P = np.eye(6)

trace_list = []  # 用于保存目标box的轨迹
i_report = [0 for i in range(8)]
# 加载模板图像
templates = []


big_step_y = 40
big_step_x = 20
small_step = 2


def load_templates(template_dir):
    templates.clear()
    for fname in os.listdir(template_dir):
        template = load_image(os.path.join(template_dir, fname))
        templates.append(template.astype(np.float64))


if __name__ == "__main__":
    load_templates("./data/templates_car")
    input_path = "./data/car2.mp4"
    file_name = "car_output"
    filehead = input_path.split('/')[-1]
    output_path = "out-" + filehead

    print('视频开始处理', input_path)

    # 获取视频总帧数
    cap = cv2.VideoCapture(input_path)
    frame_count = 0
    while (cap.isOpened()):
        success, frame = cap.read()
        frame_count += 1
        if not success:
            break
    cap.release()
    print('视频总帧数为', frame_count)

    cap = cv2.VideoCapture(input_path)
    frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
    fps = cap.get(cv2.CAP_PROP_FPS)

    out = cv2.VideoWriter(output_path, fourcc, fps, (int(frame_size[0]), int(frame_size[1])))
    # ---------状态初始化----------------------------------------
    X_posterior = np.array(initial_state)
    P_posterior = np.array(P)
    Z = np.array(initial_state)
    trace_list = updata_trace_list((278, 510), trace_list)
    trace_list = updata_trace_list((278, 495), trace_list)
    current_count = 0
    # 进度条绑定视频总帧数
    with tqdm(total=frame_count - 1) as pbar:
        while (cap.isOpened()):
            success, frame = cap.read()
            if not success:
                break
            current_count += 1
            draw = False
            if current_count < 132:
                last_box_posterior = xywh_to_xyxy(X_posterior[0:4])
                # plot_one_box(last_box_posterior, frame, color=(255, 0, 0), target=False)    # 蓝色
                if current_count == 114:
                    load_templates("./data/templates_car_2")
                    big_step_y = 10
                    big_step_x = 25
                # NCC目标检测
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                detected_box, ncc_value, best_i, best_scale = ncc_detect(gray, templates, scales=custom_scales)
                if detected_box is not None :
                    detected_box_center = ((detected_box[0]+detected_box[2])/2, (detected_box[1]+detected_box[3])/2)
                    distance_ratio = calculate_distance_ratio(trace_list[-2],trace_list[-1],detected_box_center)
                    if distance_ratio is False :
                        detected_box = None
                        ncc_value = -1
                if best_i != -1 and sum(i_report) <= 100:
                    i_report[best_i] += 1

                # 更新观测值
                max_iou_matched = False
                if detected_box is not None:
                    # 绘制检测框
                    plot_one_box(detected_box, frame, color=(255, 0, 0))  # 红色
                    draw = True
                    # 添加信息标注
                    # text = f"best_i: {best_i}, NCC: {best_scale}, best_ncc:{ncc_value:.3f}"
                    # cv2.putText(frame, text,
                    #             (int(detected_box[0]), int(detected_box[1]) - 10),
                    #             cv2.FONT_HERSHEY_SIMPLEX, 0.5 * best_scale,
                    #             (0, 255, 0), thickness=1)

                    # 转换坐标格式
                    xywh = xyxy_to_xywh(detected_box)

                    # 计算速度
                    dx = xywh[0] - X_posterior[0]
                    dy = xywh[1] - X_posterior[1]
                    # 更新观测向量
                    Z[0:4] = np.array([xywh]).T
                    Z[4::] = np.array([dx, dy])
                    max_iou_matched = True

                # 卡尔曼滤波预测和更新（保持原有逻辑不变）
                if max_iou_matched:
                    # 先验估计
                    X_prior = np.dot(A, X_posterior)
                    P_prior = np.dot(np.dot(A, P_posterior), A.T) + Q

                    # 计算卡尔曼增益
                    K = np.dot(np.dot(P_prior, H.T),
                               np.linalg.inv(np.dot(np.dot(H, P_prior), H.T) + R))

                    # 后验估计
                    X_posterior = X_prior + np.dot(K, (Z - np.dot(H, X_prior)))
                    P_posterior = np.dot((np.eye(6) - np.dot(K, H)), P_prior)

                    # 更新轨迹
                    box_center = (int((detected_box[0] + detected_box[2]) // 2),
                                  int((detected_box[1] + detected_box[3]) // 2))
                    trace_list = updata_trace_list(box_center, trace_list, 100)
                else:
                    # 预测状态
                    X_posterior = np.dot(A, X_posterior)
                    box_center = (int(X_posterior[0]), int(X_posterior[1]))
                    trace_list = updata_trace_list(box_center, trace_list, 20)
                if draw is False:
                    plot_one_box(last_box_posterior, frame,color=(255, 0, 0))
                # 绘制轨迹和状态
                draw_trace(frame, trace_list)
                # cv2.putText(frame, f"NCC Value: {ncc_value:.2f}", (25, 125),
                #             cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 255), 2)

            if success == True:
                # cv2.imshow('Video Processing', frame)
                out.write(frame)

                # 进度条更新一帧
                pbar.update(1)

    cv2.destroyAllWindows()
    out.release()
    cap.release()
    print('视频已保存', output_path)
    print(i_report)