import os
import torch
from models import ResUNet
import torch.nn as nn
import SimpleITK as sitk
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from loss import TverskyLoss
from train_and_test.train_test import train, test, test_average_calculate_all
from sklearn.model_selection import train_test_split
import torch.optim as optim
from sklearn.model_selection import KFold

from txt_save_read import write_things, read_values_from_file, write_lr


# 准备数据 SimpleITK?
class ITKDataset(Dataset):
    def __init__(self, image_paths, mask_paths, transform=None):
        self.image_paths = image_paths
        self.mask_paths = mask_paths
        self.transform = transform

    def __len__(self):
        return len(self.image_paths)

    def __getitem__(self, idx):
        image_path = self.image_paths[idx]
        mask_path = self.mask_paths[idx]

        # 加载训练图像和标签图像
        image = sitk.ReadImage(image_path)
        mask = sitk.ReadImage(mask_path)
        image = sitk.GetArrayFromImage(image)
        mask = sitk.GetArrayFromImage(mask)

        # 确保训练图像数据和标签图像数据具有相同的空间维度
        # 如果image.shape != mask.shape,则程序中止并抛出异常"Image and mask must have the same shape"
        assert image.shape == mask.shape, "Image and mask must have the same shape"

        # 将训练图像和标签图像转换为浮点张量
        image = torch.from_numpy(image).float()
        mask = torch.from_numpy(mask).long()  # 交叉熵函数要求标签为长整型

        # 应用转换(可选,暂时没做)
        if self.transform:
            image, mask = self.transform(image, mask)

        return image, mask


# 加载数据用了两种方法,对应的有两个类,得到的数据为pytorch张量
# 填入标签图像文件路径列表
mask_path = 'C:/SYBdataset/dataset/AffinedManualSegImageNIfTI'
mask_path_files = [os.path.join(mask_path, f) for f in os.listdir(mask_path) if f.endswith('.nii.gz')]
# 填入测试图像文件路径列表
# image_path = 'C:/SYBdataset/dataset/RawImageNIfTI'  # 未去头骨数据路径
image_path = 'C:/SYBdataset/qutougu/brain'  # 已去头骨数据路径
image_path_files = [os.path.join(image_path, f) for f in os.listdir(image_path) if f.endswith('.nii.gz')]

# # 划分训练集和测试集 8:2,数据打乱, 随机种子random_state=42
# image_train_files, image_test_files, mask_train_files, mask_test_files = train_test_split(
#                                                     image_path_files, mask_path_files, test_size=0.2, shuffle=True, random_state=42)
#
# # 加载数据 SimpleITK
# train_ITK = ITKDataset(image_train_files, mask_train_files)  # train
# batch_size = 8  # 样本批次数量为8
# dataloader = DataLoader(train_ITK, batch_size=batch_size, shuffle=False)  # 建立训练样本批次,数据不打乱(可以打乱,但我没设)
# test_ITK = ITKDataset(image_test_files, mask_test_files)  # test

# GPU设置
device_cuda = torch.device("cuda")  # 驱动为GPU
device_cpu = torch.device('cpu')  # 驱动为CPU
# 实例化U-Net模型
model = ResUNet(in_channel=1, out_channel=3, training=True).to(device_cuda)  # 输入1通道(灰度图像),输出3通道(3分类),设为训练,加载到GPU上

os.environ["CUDA_VISIBLE_DEVICES"] = "0, 1, 2"  # 指定使用的GPU为GPU0,和GPU1
torch.cuda.empty_cache()  # 清空多余GPU显存

model_path = 'models/unet++_model_V1.0.4(tverskyLoss).pth'  # 保存的模型参数地址
if os.path.isfile(model_path):  # 判断是否存在已保存好的模型参数
    model_parameters = torch.load(model_path)  # 读取模型参数
    model.load_state_dict(model_parameters)  # 加载模型参数
    print('model_save is load')
else:
    print('model_save is not exist')
model = nn.DataParallel(model)  # 高效地利用多个GPU
# 定义损失函数和优化器
# criterion = nn.CrossEntropyLoss()  # 使用交叉熵损失,这个函数自己会经过一次softmax()
criterion = TverskyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)  # 使用Adam优化器,学习率为1e-5
# 设置余弦退火动态调整学习率,半衰期T_max=100,最大值eta_max=1e-4,最小值eta_min=1e-5
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=200, eta_min=1e-5)


# 使用 matplotlib 瞄一眼切片图像
"""
for images, masks in train_ITK:
    images = (images.permute(1, 2, 0)[113:129])
    masks = (masks.permute(1, 2, 0)[113:129])
    masks = masks.clamp(min=0, max=1)
    masks = masks.numpy()
    for j in range(0, 16):
        fig, axes = plt.subplots(1, 2)  # 建立窗口
        slice_image = images[j, :, :]
        slice_mask = masks[j, :, :]  # 移至CPU
        # 在第一个轴上显示 slice_1 并添加颜色条
        im1 = axes[0].imshow(slice_image, cmap='gray')  # 使用灰度映射显示图片
        cbar1 = plt.colorbar(im1, ax=axes[0])  # 加个灰度的颜色条
        axes[0].set_title("Forecast")

        # 在第二个轴上显示 slice_2 并添加颜色条
        im2 = axes[1].imshow(slice_mask, cmap='gray')  # 使用灰度映射显示图片
        cbar2 = plt.colorbar(im2, ax=axes[1])  # 加个灰度的颜色条
        axes[1].set_title("Label")
        plt.show()
"""

choice = True  # 选择是否进行训练
# 列表值, 可以用于画图
lr_all = []
dice_left_plt = []
dice_right_plt = []
# 五折交叉验证中用于保存每次测试数据
dice_left_all = torch.zeros((5, 1)).to(device_cuda)
dice_right_all = torch.zeros((5, 1)).to(device_cuda)
jaccard_left_all = torch.zeros((5, 1)).to(device_cuda)
jaccard_right_all = torch.zeros((5, 1)).to(device_cuda)
PPV_left_all = torch.zeros((5, 1)).to(device_cuda)
PPV_right_all = torch.zeros((5, 1)).to(device_cuda)
MSE_left_all = torch.zeros((5, 1)).to(device_cuda)
MSE_right_all = torch.zeros((5, 1)).to(device_cuda)
hd_95_left_all = torch.zeros((5, 1)).to(device_cuda)
hd_95_right_all = torch.zeros((5, 1)).to(device_cuda)

num_epochs_n = 0  # 用于记录训练总轮次
flag_left, flag_right = 0, 0  # 判断是否继续训练标志位
flag_left_save, flag_right_save = 0, 0  # 是否保存模型标志位


# 设置5折交叉验证
kf = KFold(n_splits=5, shuffle=True)
# test_save(test_ITK, model)

# 训练模型循环
while choice:
    # 将上一次保存的最优参数赋予max
    dice_left_max, jaccard_left_max, PPV_left_max, \
    dice_right_max, jaccard_right_max, PPV_right_max = read_values_from_file()
    num_epochs = 200  # 一共训练num_epochs轮
    # for epoch in range(0, num_epochs):
    # --------五折交叉验证-------------------------------------------
    for epoch in range(0, 40):
        kf_num = 0
        # 遍历每一折
        for train_index, test_index in kf.split(image_path_files):
            # 获取训练集和测试集
            img_train, img_test = [image_path_files[i] for i in train_index], [image_path_files[i] for i in test_index]
            mask_train, mask_test = [mask_path_files[i] for i in train_index], [mask_path_files[i] for i in test_index]
            train_ITK = ITKDataset(img_train, mask_train)  # train
            batch_size = 4  # 样本批次数量为5
            dataloader = DataLoader(train_ITK, batch_size=batch_size, shuffle=True)  # 建立样本批次,数据打乱
            test_ITK = ITKDataset(img_test, mask_test)  # test
            # --------五折交叉验证-------------------------------------------
            print('model begin to train')
            loss_train = train(
                                dataloader,
                                model,
                                criterion,
                                optimizer,
                                device_cuda,
                                epoch,  # *5+kf_num
                                num_epochs
                             )
            print(f'Learning rate: {optimizer.param_groups[0]["lr"]}')  # 打印当前学习率
            lr_all.append(optimizer.param_groups[0]["lr"])
            scheduler.step()  # 每一轮更新一次学习率

            # 进行测试,得到每一折的总平均系数
            dice_left, jaccard_left, PPV_left, MSE_left, hd_95_left,\
            dice_right, jaccard_right, PPV_right, MSE_right, hd_95_right \
                = test(
                          test_ITK,
                          device_cuda,
                          model
                     )
            # --------五折交叉验证-------------------------------------------
            dice_left_all[kf_num] = dice_left
            jaccard_left_all[kf_num] = jaccard_left
            PPV_left_all[kf_num] = PPV_left
            MSE_left_all[kf_num] = MSE_left
            hd_95_left_all[kf_num] = hd_95_left

            dice_right_all[kf_num] = dice_right
            jaccard_right_all[kf_num] = jaccard_right
            PPV_right_all[kf_num] = PPV_right
            MSE_right_all[kf_num] = MSE_right
            hd_95_right_all[kf_num] = hd_95_right
            kf_num += 1
            # --------五折交叉验证-------------------------------------------

        # --------五折交叉验证-------------------------------------------
        num_epochs_n += 40  # 记录总轮数
        print(f'epoch {epoch} result')
        dice_left, jaccard_left, PPV_left, MSE_left, hd_95_left = \
            test_average_calculate_all(
                                        dice_left_all,
                                        jaccard_left_all,
                                        PPV_left_all,
                                        MSE_left_all,
                                        hd_95_left_all,
                                        'left',
                                    )
        dice_right, jaccard_right, PPV_right, MSE_right, hd_95_right = \
            test_average_calculate_all(
                                        dice_right_all,
                                        jaccard_right_all,
                                        PPV_right_all,
                                        MSE_right_all,
                                        hd_95_right_all,
                                        'right',
                                    )
        # --------五折交叉验证-------------------------------------------
        dice_left_plt.append(dice_left)
        dice_right_plt.append(dice_right)
        # 判断评估指标是否更优
        if dice_left >= dice_left_max and jaccard_left >= jaccard_left_max and PPV_left >= PPV_left_max:
            flag_left_save = 1
        if dice_right >= dice_right_max and jaccard_right >= jaccard_right_max and PPV_right >= PPV_right_max:
            flag_right_save = 1
        if flag_left_save == 1 and flag_right_save == 1:
            # 每训练1轮就判断一次评估指标,只取最优的模型参数进行保存
            # model文件夹要自己建,保存于当前工程路径下的model文件夹里
            torch.save(model.state_dict(), 'models/unet++_model_V1.0.4(tverskyLoss).pth')
            print('unet++_model_parameter is saved\n')
            flag_left_save, flag_right_save = 0, 0
            dice_left_max, jaccard_left_max, PPV_left_max = dice_left, jaccard_left, PPV_left
            dice_right_max, jaccard_right_max, PPV_right_max = dice_right, jaccard_right, PPV_right
            # 保存现在测试的最优测试评估指标参数
            write_things(dice_left_max, jaccard_left_max, PPV_left_max, dice_right_max, jaccard_right_max, PPV_right_max)
            # 保存此时的学习率
            write_lr(optimizer.param_groups[0]["lr"])
        else:
            print('unet++_model_parameter is not saved\n')

        # 测试结果未达到设定要求的,令choice=True,否则为False,跳出训练
        # if dice_left >= 0.95 and jaccard_left >= 0.95 and PPV_left >= 0.95:
        #     flag_left = 1
        # if dice_right >= 0.95 and jaccard_right >= 0.95 and PPV_right >= 0.95:
        #     flag_right = 1
        # if flag_left == 1 and flag_right == 1:  # 当左右海马体的dice, jaccard, PPV都达到95%时,退出训练
        #     choice = False
        # else:  # 否则继续训练
        #     choice = True
    # if num_epochs_n > 400:  # 当累计训练次数超过400轮时强行终止
    #     choice = False

# # 画图
# plt.figure(figsize=(12, 5))
# plt.subplot(1, 2, 1)
# plt.plot(dice_left_plt)
# plt.title('Dice_left-lr')
# plt.xlabel('epoch')
# plt.ylabel('Dice_left')
#
# plt.subplot(1, 2, 2)
# plt.plot(dice_right_plt)
# plt.title('Dice_right-lr')
# plt.xlabel('epoch')
# plt.ylabel('Dice_right')
# plt.show()

print('End of the script.')



