# Author Zenos
# Create 2025/2/27 下午4:06
import os
import argparse
from dataset import FontDataset
import torch
import torch.nn as nn
from models.ustroke import UStroke
from models.mynet import mynet
from models.mynet2 import mynet2
from models.mynet3 import mynet3
from models.mynet4 import mynet4
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from utils.utils import custom_collate_fn
from utils.utils import mask_logits


def parse_args():
    parser = argparse.ArgumentParser(description="Train the mynet model for font segmentation.")
    # 添加命令行参数
    # parser.add_argument('--data_base_url', type=str, default="/Users/zenos/Downloads/CCSSD/FZLBJW2017", help='Path to the dataset.')
    parser.add_argument('--data_base_url', type=str, default="/Users/zenos/Downloads/CCSSD/FZLBJW2017",
                        help='Path to the dataset.')
    parser.add_argument('--batch_size', type=int, default=1, help='Batch size for training.')
    parser.add_argument('--epochs', type=int, default=200, help='Number of epochs to train.')
    parser.add_argument('--device', type=str, default="cuda:0", help='Whether to use GPU or not. use cuda:0')
    parser.add_argument('--learning_rate', type=float, default=0.0001, help='Learning rate for optimizer.')
    parser.add_argument('--model', type=str, default="mynet4", help='model for training')

    return parser.parse_args()


def dice_loss(pred, target, num_classes=35, smooth=1e-6):
    # 获取标签和预测中实际出现的类别
    unique_target_classes = torch.unique(target)
    unique_pred_classes = torch.unique(pred)

    # 合并标签和预测中出现的类别
    unique_classes = torch.unique(torch.cat((unique_target_classes, unique_pred_classes), dim=0))

    dice_coeffs = torch.zeros(num_classes, device=target.device)  # 确保它在与 target 相同的设备上

    for c in unique_classes:  # 只计算标签和预测中出现的类别
        pred_c = (pred == c).float()
        target_c = (target == c).float()

        intersection = (pred_c * target_c).sum()
        union = pred_c.sum() + target_c.sum()

        # 计算每个类别的 Dice Coefficient
        dice_coeffs[c] = (2. * intersection + smooth) / (union + smooth)

    # 计算所有类别的 Dice Loss（对所有出现过的类别取平均）
    return 1 - dice_coeffs[unique_classes].mean()


def train(args):
    #  获取命令行参数
    DATA_BASE_URL = args.data_base_url
    BATCH_SIZE = args.batch_size
    EPOCHS = args.epochs
    DEVICE = args.device
    LEARNING_RATE = args.learning_rate
    MODEL = args.model

    # 判断GPU是否可用
    IS_USE_GPU = torch.cuda.is_available()  # 自动检测 GPU
    if IS_USE_GPU:
        print(f"GPU可用，加载命令参数, DEVICE = {DEVICE}")
    else:
        DEVICE = "cpu"
        print(f"GPU不可用，DEVICE 使用cpu, DEVICE = {DEVICE}")
    # 打印所有参数
    print(f"训练参数：")
    print(f"数据集路径 (data_base_url): {DATA_BASE_URL}")
    print(f"批次大小 (batch_size): {BATCH_SIZE}")
    print(f"训练轮数 (epochs): {EPOCHS}")
    print(f"设备类型 (device): {DEVICE}")
    print(f"学习率 (learning_rate): {LEARNING_RATE}")
    print(f"使用的模型 (model): {MODEL}")
    SAVE_NAME = "FiLM-%s-%s" % (MODEL, os.path.basename(DATA_BASE_URL))
    # SAVE_NAME = "%s-%s-%s" % ("dloss", MODEL, os.path.basename(DATA_BASE_URL))

    os.makedirs('checkpoint', exist_ok=True)  # 确保目录存在

    TrainDataset = FontDataset(True, DATA_BASE_URL)
    # 定义数据集迭代器
    train_iter = torch.utils.data.DataLoader(TrainDataset, BATCH_SIZE, shuffle=True, drop_last=True,
                                             collate_fn=custom_collate_fn)
    print("1.数据集加载成功")

    # 根据模型参数选择模型
    if MODEL == "ustroke":
        # 创建 Unet 模型
        net = UStroke(num_classes=35)
        print("2.使用 Unet 网络")
    elif MODEL == "mynet":
        net = mynet(num_classes=35)
        print("2.使用 mynet 网络")
    elif MODEL == "mynet2":
        net = mynet2(num_classes=35)
        print("2.使用 mynet2 网络")
    elif MODEL == "mynet3":
        net = mynet3(num_classes=35)
        print("2.使用 mynet3 网络")
    elif MODEL == "mynet4":
        net = mynet4(num_classes=35)
        print("2.使用 mynet4 网络")
    else:
        raise ValueError(f"不支持的模型类型: {MODEL}")

    net.to(DEVICE)  # 将模型加载到对应设备
    ce_loss_fun = nn.CrossEntropyLoss()
    optimiser = torch.optim.Adam(net.parameters(), lr=LEARNING_RATE)

    # 训练
    losses = []
    print("3.开始训练...")
    for epoch in range(EPOCHS):  # 遍历 epochs
        print(f'training_epoch {epoch + 1} of {EPOCHS}')

        # tqdm 进度条
        epoch_losses = []
        for X, Y, C in tqdm(train_iter, desc=f"Epoch {epoch + 1}/{EPOCHS}"):
            X, Y, C = X.to(DEVICE), Y.to(DEVICE), C.to(DEVICE)  # 确保数据和模型在同一设备上
            Y_hat = net(X, C)  # 前向传播
            # Y_hat = mask_logits(Y_hat, C)
            ce_loss = ce_loss_fun(Y_hat, Y)
            # Y_pred = Y_hat.argmax(dim=1)
            # d_loss = dice_loss(Y_pred, Y, num_classes=35)
            # loss = 0.5 * ce_loss + 0.5 * d_loss
            loss = ce_loss
            optimiser.zero_grad()  # 清空梯度
            loss.backward()  # 计算梯度
            optimiser.step()  # 更新参数
            epoch_losses.append(loss.item())

        # 计算当前 epoch 的平均 loss
        epoch_loss = np.mean(epoch_losses)
        losses.append(epoch_loss)
        print(f"Epoch {epoch + 1} Loss: {epoch_loss:.4f}")

        if (epoch + 1) % 50 == 0:
            # 每个 epoch 结束后保存模型
            torch.save(net, f'checkpoint/{SAVE_NAME}-{epoch + 1}epochs.pt')

    # 训练结束后绘制损失曲线
    print("4.训练结束  绘制损失曲线")
    plt.figure(figsize=(10, 5))
    plt.plot(range(1, EPOCHS + 1), losses, linestyle='-', linewidth=1, color='b',
             label="Training Loss")
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.title("Training Loss Curve")
    plt.legend()
    plt.grid(True)
    plt.savefig(f'checkpoint/loss-{SAVE_NAME}-{EPOCHS}.png', dpi=300,
                bbox_inches='tight')  # dpi 设置分辨率，bbox_inches='tight' 防止裁剪


if __name__ == "__main__":
    args = parse_args()
    train(args)
