# _*_coding:utf-8_*_
# 编辑用户：IT
# 文件名称：train_test
# 开发时间：2024/5/10 18:58
import os
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import SimpleITK as sitk
from torch.cuda.amp import autocast, GradScaler
from calculate import average_standard, calculate_all, hippocampus_left_right


def train(train_loader,
          model,
          criterion,
          optimizer,
          device,
          epoch,
          num_epochs
          ):
    model.train()  # 模型进入训练模式
    model.training = True
    scaler = GradScaler()  # 自动混合精度
    # SimpleITK
    for train_files_num, data in enumerate(train_loader):
        images, masks = data  # (batch_size, 176, 256, 224)
        batch_size = images.shape[0]  # 提取样本批次数量
        # 设置随机数, 用于设置随机上下移动训练区间,训练区间大小为40
        random_number = random.randint(0, 10)
        min_range = 90 + random_number
        max_range = min_range + 40

        # 这里使用了permute换轴操作，便于后面提取数据, 不换也可以
        # 使用reshape多加了一个轴, 用于表示通道数
        # (batch_size, 176, 256, 224)->(batch_size, 256, 224, 176)->(batch_size, 1, 40, 224, 176)
        images = images.permute(0, 2, 3, 1)[:, min_range:max_range].reshape(batch_size, 1, 40, 224, 176).to(device)
        # (batch_size, 176, 256, 224)->(batch_size, 256, 224, 176)->(batch_size, 1, 40, 224, 176)
        masks = (masks.permute(0, 2, 3, 1)[:, min_range:max_range]).reshape(batch_size, 40, 224, 176).to(device)
        # 前向过程(model + loss)开启 autocast自动混合精度
        with autocast():
            # 前向传播,获得预测图像数据
            outputs = model(images)  # (batch_size, 3, 40, 224, 176)

            # criterion_CrossEntropy = nn.CrossEntropyLoss()  # 使用交叉熵损失,这个函数自己会经过一次softmax()
            # # (batch_size*48*224*176, 3)   (48*224*176)
            # loss_CrossEntropy = criterion_CrossEntropy(outputs[3].reshape(-1, 3), masks.reshape(-1))

            aplas = 0.4  # 受监督系数
            # 计算各个深度监督输出的损失
            loss0 = criterion(outputs[0], masks)
            loss1 = criterion(outputs[1], masks)
            loss2 = criterion(outputs[2], masks)
            loss3 = criterion(outputs[3], masks)
            # 计算总损失,结合深监督损失
            # loss = 0.5 * (loss3 + aplas * (loss0 + loss1 + loss2)) + 0.5 * loss_CrossEntropy
            loss = loss3 + aplas * (loss0 + loss1 + loss2)

        # 反向传播和优化
        optimizer.zero_grad()  # 清空梯度值

        # scale(loss), 为了梯度放大
        scaler.scale(loss).backward()
        # scaler.step() 首先把梯度的值unscale回来.
        # 如果梯度的值不是 infs 或者 NaNs, 那么调用optimizer.step()来更新权重,
        # 否则, 忽略step调用, 从而保证权重不更新(不被破坏)
        scaler.step(optimizer)
        # 准备着，查看是否要增大scaler
        scaler.update()

        # 输出一下训练进度
        print(f"Epoch [{epoch + 1}/{num_epochs}], project: {train_files_num}, Loss: {loss.item()}")
    return loss.item()  # 返回损失值, 用于画图


# 用于打印样本的测试评估指标信息
def test_printf(
                dice,
                jaccard,
                PPV,
                MSE,
                hd_95,
                ):
    print(f"Dice_test is {dice}"
          f", jaccard_test is {jaccard}")
    print(f"PPV_test is {PPV}"
          f", MSE_test is {MSE}")
    print(f"hausdorff_distance_test is {hd_95}\n")


# 计算单个样本的评估指标并打印
def test_average_calculate(
                            predicted,
                            masks,
                            test_files_num,
                            direction
                            ):
    # 计算单个样本的评估指标
    dice, jaccard, PPV, MSE, hd_95 = calculate_all(predicted, masks)

    print(f"{direction}_test_number : {test_files_num}")
    # 打印评估指标信息
    test_printf(
                dice,
                jaccard,
                PPV,
                MSE,
                hd_95
              )
    return dice, jaccard, PPV, MSE, hd_95


# 计算总样本评估指标的平均值和标准差并打印
def test_average_calculate_all(
                                dice_n,
                                jaccard_n,
                                PPV_n,
                                MSE_n,
                                hd_95_n,
                                direction
                                ):
    dice_average, dice_standard = average_standard(dice_n)
    jaccard_average, jaccard_standard = average_standard(jaccard_n)
    PPV_average, PPV_standard = average_standard(PPV_n)
    MSE_average, MSE_standard = average_standard(MSE_n)
    hd_95_average, hd_95_standard = average_standard(hd_95_n)
    print(f"test_{direction}:")
    print(f"Dice_average is {dice_average},Dice_standard is {dice_standard}")
    print(f"jaccard_average is {jaccard_average},jaccard_standard is {jaccard_standard}")
    print(f"PPV_average is {PPV_average},PPV_standard is {PPV_standard}")
    print(f"MSE_average is {MSE_average},MSE_standard is {MSE_standard}")
    print(f"hd_95_average is {hd_95_average},hd_95_standard is {hd_95_standard}")
    return dice_average, jaccard_average, PPV_average, MSE_average, hd_95_average


# 测试
def test(
          test_loader,
          device,
          model,
         ):
    # 获取测试样本数量
    num = len(test_loader)
    # GPU设置
    device_cuda = torch.device("cuda")  # 驱动为GPU
    dice_n_left, dice_n_right = torch.zeros((num, 1)).to(device_cuda), torch.zeros((num, 1)).to(device_cuda)  # 左右海马体的dice系数
    jaccard_n_left, jaccard_n_right = torch.zeros((num, 1)).to(device_cuda), torch.zeros((num, 1)).to(device_cuda)  # 左右海马体的jaccard系数
    PPV_n_left, PPV_n_right = torch.zeros((num, 1)).to(device_cuda), torch.zeros((num, 1)).to(device_cuda)  # 左右海马体的PPV
    MSE_n_left, MSE_n_right = torch.zeros((num, 1)).to(device_cuda), torch.zeros((num, 1)).to(device_cuda)  # 左右海马体的MSE
    hd_95_n_left, hd_95_n_right = torch.zeros((num, 1)).to(device_cuda), torch.zeros((num, 1)).to(device_cuda)  # 左右海马体的hd_95
    model.eval()  # 模型进入评估模式,激活所有神经元
    model.training = False  # 退出训练
    # 测试时不计算梯度
    with torch.no_grad():
        print('model begin to test')
        for test_files_num, data in enumerate(test_loader):  # 测试样本批次为1
            images, masks = data  # (batch_size, 176, 256, 224)
            images = images.reshape(1, 176, 256, 224)  # (176, 256, 224) -> (1, 176, 256, 224)
            masks = masks.reshape(1, 176, 256, 224)  # (176, 256, 224) -> (1, 176, 256, 224)

            # (1, 224, 256, 176)->(256, 1, 224, 176)->(1, 1, 64, 224, 176)
            images = images.permute(0, 2, 3, 1)[0][76:140].reshape(1, 1, 64, 224, 176).to(device)
            # (1, 224, 256, 176)->(256, 1, 224, 176)->(1, 64, 224, 176)
            masks = (masks.permute(0, 2, 3, 1)[0][76:140]).reshape(1, 64, 224, 176).to(device)

            # 预测输出
            outputs = model(images)  # (1, 3, 64, 224, 176)

            # 找到预测值最高的索引作为预测结果
            predicted = torch.argmax(outputs, dim=1)  # (1, 64, 224, 176)
            predicted = predicted.reshape(64, 224, 176)
            masks = masks.reshape(64, 224, 176)

            # 分割出预测的左右海马体和标签的左右海马体
            predicted_left, predicted_right = hippocampus_left_right(predicted)
            masks_left, masks_right = hippocampus_left_right(masks)
            """
            # 使用 matplotlib 瞄一眼切片图像
            device_cpu = torch.device('cpu')  # 驱动为CPU
            outputs2 = predicted.to(device_cpu).numpy()  # 移至CPU并转换为numpy数组
            for j in range(10, 30):
                fig, axes = plt.subplots(1, 2)  # 建立窗口
                plt.subplots_adjust(top=1)
                slice_1 = outputs2[j, :, :]
                slice_2 = masks[j, :, :].to(device_cpu)  # 移至CPU
                # 在第一个轴上显示 slice_1 并添加颜色条
                im1 = axes[0].imshow(slice_1, cmap='gray')  # 使用灰度映射显示图片
                cbar1 = plt.colorbar(im1, ax=axes[0])  # 加个灰度的颜色条
                axes[0].set_title("Predicted")

                # 在第二个轴上显示 slice_2 并添加颜色条
                im2 = axes[1].imshow(slice_2, cmap='gray')  # 使用灰度映射显示图片
                cbar2 = plt.colorbar(im2, ax=axes[1])  # 加个灰度的颜色条
                axes[1].set_title("Label")
                plt.show()
            """
            # 打印一下并获取每个样本的评估指标
            d_l, j_l, p_l, m_l, h_l = \
                test_average_calculate(
                                        predicted_left,
                                        masks_left,
                                        test_files_num,  # 第几个样本
                                        'left',
                                        )
            d_r, j_r, p_r, m_r, h_r = \
                test_average_calculate(
                                        predicted_right,
                                        masks_right,
                                        test_files_num,  # 第几个样本
                                        'right',
                                        )
            # 收集每个样本的评估指标
            dice_n_left[test_files_num] = d_l
            jaccard_n_left[test_files_num] = j_l
            PPV_n_left[test_files_num] = p_l
            MSE_n_left[test_files_num] = m_l
            hd_95_n_left[test_files_num] = h_l

            dice_n_right[test_files_num] = d_r
            jaccard_n_right[test_files_num] = j_r
            PPV_n_right[test_files_num] = p_r
            MSE_n_right[test_files_num] = m_r
            hd_95_n_right[test_files_num] = h_r
        # 打印一下并获取总样本评估指标的平均值
        d_l_a, j_l_a, p_l_a, m_l_a, h_l_a = test_average_calculate_all(
                                                                        dice_n_left,
                                                                        jaccard_n_left,
                                                                        PPV_n_left,
                                                                        MSE_n_left,
                                                                        hd_95_n_left,
                                                                        'left',
                                                                        )
        d_r_a, j_r_a, p_r_a, m_r_a, h_r_a = test_average_calculate_all(
                                                                        dice_n_right,
                                                                        jaccard_n_right,
                                                                        PPV_n_right,
                                                                        MSE_n_right,
                                                                        hd_95_n_right,
                                                                        'right',
                                                                        )

    return d_l_a, j_l_a, p_l_a, m_l_a, h_l_a, d_r_a, j_r_a, p_r_a, m_r_a, h_r_a




