# Author Zenos
# Create 2025/3/10 上午10:07
import argparse
import torch
from dataset import FontDataset
from tqdm import tqdm
import numpy as np
from sklearn.metrics import confusion_matrix
from target import calculate_mean_pixel_accuracy, calculate_miou,foreground_frequency_weighted_intersection_over_union
from train import custom_collate_fn
from utils.utils import mask_logits



def parse_args():
    parser = argparse.ArgumentParser(description="Train the mynet model for font segmentation.")
    # 添加命令行参数
    parser.add_argument('--data_base_url', type=str, default="/Users/zenos/Downloads/CCSSD/FZLBJW2017", help='Path to the dataset.')
    # parser.add_argument('--data_base_url', type=str, default="E:\CCSSD\FZLBJW2017", help='Path to the dataset.')
    parser.add_argument('--batch_size', type=int, default=16, help='Batch size for training.')
    parser.add_argument('--device', type=str, default="cuda:0", help='Whether to use GPU or not. use cuda:0')
    parser.add_argument('--checkpoint', type=str, default="checkpoint/FiLM-ustroke-FZLBJW2017-200epochs.pt", help='Path to the trained model checkpoint.')

    return parser.parse_args()


def predict(args):
    #  获取命令行参数
    DATA_BASE_URL = args.data_base_url
    BATCH_SIZE = args.batch_size
    DEVICE = args.device
    CHECKPOINT_PATH = args.checkpoint

    # 判断GPU是否可用
    if not torch.cuda.is_available():
        DEVICE = "cpu"
        print("GPU不可用，使用 CPU 进行推理")

    # 打印所有参数
    print(f"测试参数：")
    print(f"数据集路径 (data_base_url): {DATA_BASE_URL}")
    print(f"批次大小 (batch_size): {BATCH_SIZE}")
    print(f"设备类型 (device): {DEVICE}")
    print(f"加载的模型权重: {CHECKPOINT_PATH}")

    TestDataset = FontDataset(False, DATA_BASE_URL)
    # 定义数据集迭代器
    test_iter = torch.utils.data.DataLoader(TestDataset, BATCH_SIZE, shuffle=True, drop_last=True,
                                             collate_fn=custom_collate_fn)
    print("1. 测试集加载成功，共 {} 张图片".format(len(TestDataset)))

    net = torch.load(CHECKPOINT_PATH, map_location=DEVICE, weights_only=False)
    net.to(DEVICE)
    net.eval()  # 进入推理模式
    print("3.开始推理...")
    # 初始化混淆矩阵，类别数为 35
    num_classes = 35
    conf_matrix = np.zeros((num_classes, num_classes), dtype=int)
    with torch.no_grad():  # 禁用梯度计算，节省显存
        for X, Y, C in tqdm(test_iter, desc="Evaluating"):
            X, Y, C = X.to(DEVICE), Y.to(DEVICE), C.to(DEVICE)   # 确保数据和模型在同一设备上
            Y_pred = net(X, C)
            # Y_pred = mask_logits(Y_pred, C)
            Y_pred = Y_pred.argmax(dim=1)  # 取每个像素的预测类别
            # 将真实标签和预测标签扁平化
            true_labels = Y.view(-1).cpu().numpy()
            pred_labels = Y_pred.view(-1).cpu().numpy()
            # 更新混淆矩阵
            batch_conf_matrix = confusion_matrix(true_labels, pred_labels, labels=np.arange(num_classes))
            conf_matrix += batch_conf_matrix  # 将当前批次的混淆矩阵累加到总混淆矩阵中

    # 计算pixel accuracy
    pixel_accuracy, class_pixel_accuracy = calculate_mean_pixel_accuracy(conf_matrix)
    print(f"Pixel Accuracy: {pixel_accuracy:.4f}")
    # 打印结果
    for i, acc in enumerate(class_pixel_accuracy):
        print(f"Category {i}: Accuracy = {acc}")
    # 计算miou
    miou, iou_per_class = calculate_miou(conf_matrix)
    print(f"MIOU: {miou:.4f}, IoU_per_class: {iou_per_class}")
    # 计算FIou
    ffw_iou = foreground_frequency_weighted_intersection_over_union(conf_matrix)
    print(f"FFW-IOU: {ffw_iou:.4f}")


if __name__ == "__main__":
    args = parse_args()
    predict(args)
