# ==============================================================================
# Copyright (c) 2025 CompanyName. All rights reserved.
# Author:         22020873 陈泽欣
# Project:        Design of Deep Learning Fundamental Course
# Module:         visualization.py
# Date:           2025-05-24
# Description:    本模块实现了深度学习模型输出结果的可视化功能，主要包括：
#                 - PnP 和 Bundle Adjustment（BA）姿态估计结果的投影绘制；
#                 - PoseNet 模型预测结果的融合与显示；
#                 - YOLO 检测框和分类信息的图像标注；
#                 - 多种算法结果的重投影误差计算与动态筛选；
#                 - 图像上文本信息的绘制支持。
#                 该模块是整个骰子姿态估计系统的核心可视化组件，适用于实时检测与离线分析场景。
# ==============================================================================
import cv2
import numpy as np

from core.ba_solver import bundle_adjustment
from configs.norm_data import dice_3d_points, camera_matrix, dist_coefs
from core.pnp_solver import solve_pnp, calculate_reprojection_error
from core.posenet_detect import predict_rvec
from configs.config import Config
from utils.utils import match_points_by_hungarian, rvec_to_euler


def weighted_fusion(
    corners_list, rvec_list, tvec_list,
    ba_rvecs, ba_tvecs, posenet_rvecs
):
    num_points = len(dice_3d_points)
    final_imgpts = np.zeros((num_points, 1, 2), dtype=np.float64)
    total_weight = 0.0

    final_errors = []

    # 获取参考帧的 imgpts（第一帧）
    ref_idx = 0
    ref_imgpts_pnp, _ = cv2.projectPoints(dice_3d_points, rvec_list[ref_idx], tvec_list[ref_idx], camera_matrix, dist_coefs)
    ref_imgpts_pnp = ref_imgpts_pnp.reshape(-1, 2)

    for i in range(len(corners_list)):
        points_2d = corners_list[i]
        rvec_pnp = rvec_list[i]
        tvec_pnp = tvec_list[i]
        ba_rvec = ba_rvecs[i]
        ba_tvec = ba_tvecs[i]
        posenet_rvec = posenet_rvecs[i]

        # PnP 投影
        imgpts_pnp, _ = cv2.projectPoints(dice_3d_points, rvec_pnp, tvec_pnp, camera_matrix, dist_coefs)
        imgpts_pnp = imgpts_pnp.reshape(-1, 2)
        error_pnp = calculate_reprojection_error(points_2d, imgpts_pnp)

        # BA 投影
        imgpts_ba, _ = cv2.projectPoints(dice_3d_points, ba_rvec, ba_tvec, camera_matrix, dist_coefs)
        imgpts_ba = imgpts_ba.reshape(-1, 2)
        error_ba = calculate_reprojection_error(points_2d, imgpts_ba)

        # PoseNet 投影
        imgpts_posenet, _ = cv2.projectPoints(dice_3d_points, posenet_rvec, tvec_pnp, camera_matrix, dist_coefs)
        imgpts_posenet = imgpts_posenet.reshape(-1, 2)
        error_posenet = calculate_reprojection_error(points_2d, imgpts_posenet)

        # 判断误差是否合理（可选）
        if abs(error_pnp - error_ba) > 5 or abs(error_pnp - error_posenet) > 5:
            continue

        # 点序匹配
        aligned_imgpts_pnp = match_points_by_hungarian(ref_imgpts_pnp, imgpts_pnp)
        aligned_imgpts_ba = match_points_by_hungarian(ref_imgpts_pnp, imgpts_ba)
        aligned_imgpts_posenet = match_points_by_hungarian(ref_imgpts_pnp, imgpts_posenet)

        # 权重计算（越小的 reproj error 表示越可靠）
        weight_pnp = 1.0 / (error_pnp + 1e-8)
        weight_ba = 1.0 / (error_ba + 1e-8)
        weight_posenet = 1.0 / (error_posenet + 1e-8)

        total_weight += weight_pnp + weight_ba + weight_posenet

        # 加权累加
        for j in range(num_points):
            final_imgpts[j] += (
                weight_pnp * aligned_imgpts_pnp[j] +
                weight_ba * aligned_imgpts_ba[j] +
                weight_posenet * aligned_imgpts_posenet[j]
            )

        final_errors.append(calculate_reprojection_error(points_2d, final_imgpts / total_weight))

    # 检查是否有有效帧参与融合
    if total_weight < 1e-6:
        # print("⚠️ 无有效帧参与融合，返回空数组")
        return np.array([]), []

    # 归一化
    final_imgpts /= total_weight

    return final_imgpts, min(final_errors)

def compute_pnp_results(corners_list):
    """
    对所有角点执行 PnP，返回 rvec_list, tvec_list, pnp_imgpts_list, pnp_errors
    """
    rvec_list = []
    tvec_list = []
    pnp_imgpts_list = []
    pnp_errors = []

    for points_2d in corners_list:
        rvec, tvec = solve_pnp(points_2d)
        rvec_list.append(rvec)
        tvec_list.append(tvec)

        imgpts_pnp, _ = cv2.projectPoints(dice_3d_points, rvec, tvec, camera_matrix, dist_coefs)
        imgpts_pnp = imgpts_pnp.reshape(-1, 2)
        error = calculate_reprojection_error(points_2d, imgpts_pnp)

        pnp_imgpts_list.append(imgpts_pnp)
        pnp_errors.append(error)

    return rvec_list, tvec_list, pnp_imgpts_list, pnp_errors


def compute_ba_results(corners_list, rvec_list, tvec_list, huber_loss):
    """
    执行 BA 并返回 BA 的投影点和误差
    """
    ba_rvecs, ba_tvecs = bundle_adjustment(corners_list, rvec_list, tvec_list, huber_loss)

    ba_imgpts_list = []
    ba_errors = []

    for i in range(len(corners_list)):
        imgpts_ba, _ = cv2.projectPoints(dice_3d_points, ba_rvecs[i], ba_tvecs[i], camera_matrix, dist_coefs)
        imgpts_ba = imgpts_ba.reshape(-1, 2)
        error = calculate_reprojection_error(corners_list[i], imgpts_ba)

        ba_imgpts_list.append(imgpts_ba)
        ba_errors.append(error)

    return ba_rvecs, ba_tvecs, ba_imgpts_list, ba_errors


def compute_posenet_results(corners_list, mask_list, tvec_list):
    """
    使用 PoseNet 预测旋转向量并计算投影结果
    """
    posenet_imgpts_list = []
    posenet_errors = []
    posenet_rvecs = []

    for i in range(len(corners_list)):
        posenet_rvec = predict_rvec(Config.PoseNet_MODEL, mask_list[i], Config.DEVICE)
        posenet_rvecs.append(posenet_rvec)

        imgpts_posenet, _ = cv2.projectPoints(dice_3d_points, posenet_rvec, tvec_list[i],
                                             camera_matrix, dist_coefs)
        imgpts_posenet = imgpts_posenet.reshape(-1, 2)
        error = calculate_reprojection_error(corners_list[i], imgpts_posenet)

        posenet_imgpts_list.append(imgpts_posenet)
        posenet_errors.append(error)

    return posenet_rvecs, posenet_imgpts_list, posenet_errors


def compute_fusion_result(corners_list, rvec_list, tvec_list, ba_rvecs, ba_tvecs, posenet_rvecs):
    """
    融合 PnP + BA 的结果
    """
    fusion_imgpts, fusion_error = weighted_fusion(
        corners_list, rvec_list, tvec_list, ba_rvecs, ba_tvecs, posenet_rvecs
    )

    return fusion_imgpts, fusion_error


def reproject_and_draw(img, imgpts, color=(100, 255, 100), line_thickness=10):
    imgpts = np.int32(imgpts).reshape(-1, 2)

    img = cv2.drawContours(img, [imgpts[4:]], -1, color, 3)

    for i, j in zip(range(4), range(4, 8)):
        img = cv2.line(img, tuple(imgpts[i]), tuple(imgpts[j]), color, line_thickness)

    img = cv2.drawContours(img, [imgpts[:4]], 0, color, line_thickness)
    return img


def draw_projections(draw_img, pnp_imgpts_list, ba_imgpts_list, posenet_imgpts_list, fusion_imgpts,
                     display_options, control_params, pnp_errors, ba_errors, posenet_errors, fusion_error):
    """
    绘制所有投影结果到图像上
    """
    texts = []

    for i in range(len(pnp_imgpts_list)):
        if Config.OUT_ERROR_FLAG:
            print("pnp_repro_errors: {:.4f}".format(pnp_errors[i]))
            print("ba_repro_error: {:.4f}".format(ba_errors[i]))
            print("posenet_repro_error: {:.4f}".format(posenet_errors[i]))

        if pnp_errors[i] < control_params['normal_repro_err'] and display_options['show_pnp']:
            reproject_and_draw(draw_img, pnp_imgpts_list[i], color=control_params['pnp_frame_color'], line_thickness=control_params['normal_line_thickness'])
            texts.append(f"PnP: {pnp_errors[i]:.2f}")

        if ba_errors[i] < control_params['normal_repro_err'] and display_options['show_ba']:
            reproject_and_draw(draw_img, ba_imgpts_list[i], color=control_params['ba_frame_color'], line_thickness=control_params['normal_line_thickness'])
            texts.append(f"BA: {ba_errors[i]:.2f}")

        if posenet_errors[i] < control_params['posenet_repro_err'] and display_options['show_posenet']:
            reproject_and_draw(draw_img, posenet_imgpts_list[i], color=control_params['posenet_frame_color'], line_thickness=control_params['normal_line_thickness'])
            texts.append(f"PoseNet: {posenet_errors[i]:.2f}")

    if fusion_imgpts.size != 0 and fusion_error < control_params['fusion_repro_err'] and display_options['show_fusion']:
        reproject_and_draw(draw_img, fusion_imgpts, color=control_params['fusion_frame_color'], line_thickness=control_params['normal_line_thickness'])
        texts.append(f"Fusion: {fusion_error:.2f}")
        if Config.OUT_ERROR_FLAG:
            print("fusion_repro_error: {:.4f}".format(fusion_error), end="\n" * 2)

    return texts


def draw_error_text(draw_img, texts, font_scale, font_thickness):
    """
    在图像左上角绘制文本信息
    """
    for idx, text in enumerate(texts):
        y = Config.TOP_Y + idx * Config.TEXT_LINE_HEIGHT
        draw_color = draw_color_list[idx + 4]
        cv2.putText(draw_img, text, (Config.TOP_X, y), Config.TEXT_FONT, font_scale, draw_color, font_thickness)

def display_pose_info(best_method, best_rvec, best_tvec, min_error):
    """
    打印最佳姿态信息到控制台，并返回要在图像上显示的信息。
    """
    euler_angles = rvec_to_euler(best_rvec)

    best_tvec = np.array(best_tvec).flatten()
    best_rvec = np.array(best_rvec).flatten()
    euler_angles = np.array(euler_angles).flatten()

    if Config.OUT_BEST_POSE_FLAG:
        print("\n========================= 最佳姿态估计 =========================")
        print(f"Method: {best_method.upper()}")
        print(f"tvec: [{best_tvec[0]:.2f}, {best_tvec[1]:.2f}, {best_tvec[2]:.2f}]")
        print(f"rvec: [{best_rvec[0]:.2f}, {best_rvec[1]:.2f}, {best_rvec[2]:.2f}]")
        print(f"euler: [{euler_angles[0]:.2f}, {euler_angles[1]:.2f}, {euler_angles[2]:.2f}]")
        print("================================================================\n")

    texts = [
        f"Method ({best_method}): {min_error:.2f} px",
        f"tvec: [{best_tvec[0]:.2f}, {best_tvec[1]:.2f}, {best_tvec[2]:.2f}]",
        f"rvec: [{best_rvec[0]:.2f}, {best_rvec[1]:.2f}, {best_rvec[2]:.2f}]",
        f"euler: [{euler_angles[0]:.2f}, {euler_angles[1]:.2f}, {euler_angles[2]:.2f}]"
    ]

    return texts

def draw_pose_text(image, texts, font_scale, font_thickness):
    """
    在图像左下角绘制文本信息
    """
    BOTTOM_Y = image.shape[0] - (len(texts) * Config.TEXT_LINE_HEIGHT * Config.TEXT_LINE_HEIGHT_FACTOR)
    for idx, text in enumerate(texts):
        y = int(BOTTOM_Y + idx * Config.TEXT_LINE_HEIGHT * Config.TEXT_LINE_HEIGHT_FACTOR)

        draw_color = draw_color_list[idx + 8]
        cv2.putText(image, text, (Config.BOTTOM_X, y), Config.TEXT_FONT, font_scale * Config.TEXT_FONT_SCALE_FACTOR, draw_color, int(font_thickness * Config.TEXT_FONT_SCALE_FACTOR))


from configs.norm_data import labels_dict, draw_color_list
def draw_yolo_datas(draw_img, yolo_datas, yolo_font_scale, line_thickness):
    """
    在图像上绘制 yolo 数据
    """
    for data in yolo_datas:
        cls_idx, conf, offset_x1, offset_y1, offset_x2, offset_y2 = data
        label = f"{cls_idx + 1}{labels_dict[cls_idx]}"
        confidence = f"{conf:.2f}"

        draw_color = draw_color_list[cls_idx]
        cv2.rectangle(draw_img, (offset_x1, offset_y1), (offset_x2, offset_y2), draw_color, line_thickness)
        cv2.putText(draw_img, label, (offset_x1, offset_y1 - 10), Config.YOLO_FONT, yolo_font_scale, draw_color, line_thickness)
        cv2.putText(draw_img, confidence, (offset_x2, offset_y1 - 10), Config.YOLO_FONT, yolo_font_scale, draw_color, line_thickness)
