import os
import argparse
from torch.utils.data import DataLoader
import torch
import torch.optim as optim
from tqdm import tqdm
from BraTS import *
from models.Unet import UNet
from utils import cosine_scheduler,cal_dice,Loss

def parse_args():
    parser = argparse.ArgumentParser() #创建了一个 argparse.ArgumentParser() 对象 parser，用于定义命令行参数的解析规则。
    parser.add_argument('--num_classes', type=int, default=4)
    parser.add_argument('--seed', type=int, default=21)
    parser.add_argument('--epochs', type=int, default=80)
    parser.add_argument('--warmup_epochs', type=int, default=10)
    parser.add_argument('--batch_size', type=int, default=1)
    parser.add_argument('--lr', type=float, default=0.003)
    parser.add_argument('--min_lr', type=float, default=0.001)
    parser.add_argument('--data_path', type=str,
                        default='/root/autodl-tmp/BrainTumorSegmentation1/data_set/BraTS2021/dataset')
                        # default='D:/PyCharmCode/BrainTumorSegmentation/data_set/BraTS2021/dataset')
    parser.add_argument('--train_txt', type=str,
                        default='/root/autodl-tmp/BrainTumorSegmentation1/data_set/BraTS2021/train.txt')
                        # default='D:/PyCharmCode/BrainTumorSegmentation/data_set/BraTS2021/train.txt')
    parser.add_argument('--valid_txt', type=str,
                        default='/root/autodl-tmp/BrainTumorSegmentation1/data_set/BraTS2021/valid.txt')
                        # default='D:/PyCharmCode/BrainTumorSegmentation/data_set/BraTS2021/valid.txt')
    parser.add_argument('--test_txt', type=str,
                        default='/root/autodl-tmp/BrainTumorSegmentation1/data_set/BraTS2021/test.txt')
                        # default='D:/PyCharmCode/BrainTumorSegmentation/data_set/BraTS2021/test.txt')
    parser.add_argument('--train_log', type=str,
                        default='/root/autodl-tmp/BrainTumorSegmentation1/result/UNet1.txt')
                        # default='D:/PyCharmCode/BrainTumorSegmentation/result/UNet.txt')
    parser.add_argument('--weights', type=str,
                        default='/root/autodl-tmp/BrainTumorSegmentation1/result/UNet1.pth')
                        # default='D:/PyCharmCode/BrainTumorSegmentation/result/UNet.pth')
    parser.add_argument('--save_path', type=str,
                        default='/root/autodl-tmp/BrainTumorSegmentation1/checkpoint/UNet1')
                        # default='D:/PyCharmCode/BrainTumorSegmentation/checkpoint/UNet')

    args = parser.parse_args()
    return args


def train_loop(model,optimizer,scheduler,criterion,train_loader,device,epoch):
    model.train()
    running_loss = 0 # 用于累积每个批次的损失值，最终用于计算整个训练集的平均损失
    diceET_train = 0 # 用于累积每个批次的ET类别的Dice系数，用于评价模型的分割性能
    diceTC_train = 0 # 用于累积每个批次的TC类别的Dice系数，用于评价模型的分割性能
    diceWT_train = 0 # 用于累积每个批次的WT类别的Dice系数，用于评价模型的分割性能
    pbar = tqdm(train_loader) # 创建了一个 tqdm 进度条，用于显示训练数据的迭代进度
    for it,(images,masks) in enumerate(pbar):
        # 根据预先定义的学习率调度表（schedule）来更新学习率
        it = len(train_loader) * epoch + it # 计算当前迭代的总数
        param_group = optimizer.param_groups[0]
        param_group['lr'] = scheduler[it]
        # print(scheduler[it])

        # [b,4,128,128,128] , [b,128,128,128]
        images, masks = images.to(device),masks.to(device)
        # [b,4,128,128,128], 4分割
        outputs = model(images)
        # outputs = torch.softmax(outputs,dim=1)
        loss = criterion(outputs, masks)
        diceET, diceTC, diceWT = cal_dice(outputs,masks)
        pbar.desc = "loss: {:.3f} ".format(loss.item())

        running_loss += loss.item()
        diceET_train += diceET.item()
        diceTC_train += diceTC.item()
        diceWT_train += diceWT.item()

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # pbar.set_description("Epoch {} Loss: {:.3f}".format(epoch, running_loss / (it + 1)))  # 更新进度条描述

    loss = running_loss / len(train_loader)
    diceET = diceET_train / len(train_loader)
    diceTC = diceTC_train / len(train_loader)
    diceWT = diceWT_train / len(train_loader)
    return {'loss':loss,'diceET':diceET,'diceTC':diceTC,'diceWT':diceWT}


def val_loop(model,criterion,val_loader,device):
    model.eval()
    running_loss = 0
    diceET_val = 0
    diceTC_val = 0
    diceWT_val = 0
    pbar = tqdm(val_loader)
    with torch.no_grad():
        for images, masks in pbar:
            images, masks = images.to(device), masks.to(device)
            outputs = model(images)
            # outputs = torch.softmax(outputs,dim=1)

            loss = criterion(outputs, masks)
            diceET, diceTC, diceWT = cal_dice(outputs, masks)

            running_loss += loss.item()
            diceET_val += diceET.item()
            diceTC_val += diceTC.item()
            diceWT_val += diceWT.item()
            # pbar.desc = "loss:{:.3f} diceET:{:.3f} diceTC:{:.3f} diceWT:{:.3f} ".format(loss,diceET,diceTC,diceWT)

    loss = running_loss / len(val_loader)
    diceET = diceET_val / len(val_loader)
    diceTC = diceTC_val / len(val_loader)
    diceWT = diceWT_val / len(val_loader)
    return {'loss':loss,'diceET':diceET,'diceTC':diceTC,'diceWT':diceWT}


# 训练函数 在给定的训练数据集和验证数据集上训练模型，并在每个 epoch 结束后进行评估
def train(args,model,optimizer,scheduler,criterion,train_loader,
          val_loader,epochs,device,train_log,valid_loss_min=999.0):
    #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    for e in range(epochs):
        # train for epoch
        train_metrics = train_loop(model,optimizer,scheduler,criterion,train_loader,device,e)
        # eval for epoch
        val_metrics = val_loop(model,criterion,val_loader,device)
        # 当前 epoch 的训练损失和验证损失信息
        info1 = "Epoch:[{}/{}] train_loss: {:.3f} valid_loss: {:.3f} ".format(e+1,epochs,train_metrics["loss"],val_metrics["loss"])
        # 当前 epoch 的训练数据上各个类别的 Dice 相似度信息
        info2 = "Train--ET: {:.3f} TC: {:.3f} WT: {:.3f} ".format(train_metrics['diceET'],train_metrics['diceTC'],train_metrics['diceWT'])
        # 当前 epoch 的验证数据上各个类别的 Dice 相似度信息
        info3 = "Valid--ET: {:.3f} TC: {:.3f} WT: {:.3f} ".format(val_metrics['diceET'],val_metrics['diceTC'],val_metrics['diceWT'])
        print(info1)
        print(info2)
        print(info3)
        # 将当前 epoch 的训练和验证指标写入训练日志文件中
        with open(train_log,'a') as f:
            f.write(info1 + '\n' + info2 + ' ' + info3 + '\n')

        if not os.path.exists(args.save_path):
            os.makedirs(args.save_path)
        save_file = {"model": model.state_dict(),
                     "optimizer": optimizer.state_dict()}
        if val_metrics['loss'] < valid_loss_min:
            valid_loss_min = val_metrics['loss']
            torch.save(save_file, '/root/autodl-tmp/BrainTumorSegmentation1/result/UNet1.pth')
        else:
            torch.save(save_file,os.path.join(args.save_path,'checkpoint{}.pth'.format(e+1)))
    print("Finished Training!")

def main():
    args = parse_args()  # 解析命令行参数：通过 parse_args() 函数解析命令行参数，并根据参数设置相关配置
    torch.manual_seed(args.seed)  # 为CPU设置种子用于生成随机数，以使得结果是确定的
    torch.cuda.manual_seed_all(args.seed)  # 为所有的GPU设置种子，以使得结果是确定的

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 数据信息
    patch_size = (160,160,128)
    # 训练
    train_dataset = BraTS(args.data_path,args.train_txt,transform=transforms.Compose([
        RandomRotFlip(),
        RandomCrop(patch_size),
        GaussianNoise(p=0.1),
        ToTensor()
    ]))
    # 验证
    val_dataset = BraTS(args.data_path,args.valid_txt,transform=transforms.Compose([
        CenterCrop(patch_size),
        ToTensor()
    ]))
    # 测试
    test_dataset = BraTS(args.data_path,args.test_txt,transform=transforms.Compose([
        CenterCrop(patch_size),
        ToTensor()
    ]))

    ###############################
    train_loader = DataLoader(dataset=train_dataset, batch_size=args.batch_size, num_workers=4,   # num_worker=4
                              shuffle=True, pin_memory=True)
    val_loader = DataLoader(dataset=val_dataset, batch_size=args.batch_size, num_workers=4, shuffle=False,
                            pin_memory=True)
    test_loader = DataLoader(dataset=test_dataset, batch_size=args.batch_size, num_workers=4, shuffle=False,
                             pin_memory=True)

    print("using {} device.".format(device))
    print("using {} images for training, {} images for validation.".format(len(train_dataset), len(val_dataset)))
    # img,label = train_dataset[0]

    # 1-坏疽(NT,necrotic tumor core),2-浮肿区域(ED,peritumoral edema),4-增强肿瘤区域(ET,enhancing tumor)
    # 评价指标：ET(label4),TC(label1+label4),WT(label1+label2+label4)
    model = UNet(in_channels=4,num_classes=4).to(device)
    criterion = Loss(n_classes=4, weight=torch.tensor([0.2, 0.3, 0.25, 0.25])).to(device)
    optimizer = optim.SGD(model.parameters(),momentum=0.9, lr=0, weight_decay=5e-4)
    scheduler = cosine_scheduler(base_value=args.lr,final_value=args.min_lr,epochs=args.epochs,
                                 niter_per_ep=len(train_loader),warmup_epochs=args.warmup_epochs,start_warmup_value=5e-4)

    # 加载训练模型
    if os.path.exists(args.weights):
        weight_dict = torch.load(args.weights, map_location=device)
        model.load_state_dict(weight_dict['model'])
        optimizer.load_state_dict(weight_dict['optimizer'])
        print('Successfully loading checkpoint.')

    train(args=args,model=model, optimizer=optimizer, scheduler=scheduler, criterion=criterion, train_loader=train_loader, val_loader=val_loader, epochs=args.epochs, device=device, train_log=args.train_log)


    # metrics1 = val_loop(model, criterion, train_loader, device)
    metrics2 = val_loop(model, criterion, val_loader, device)
    metrics3 = val_loop(model, criterion, test_loader, device)

    # 最后再评价一遍所有数据，注意，这里使用的是训练结束的模型参数
    # print("Train -- loss: {:.3f} ET: {:.3f} TC: {:.3f} WT: {:.3f}".format(metrics1['loss'], metrics1['dice1'],metrics1['dice2'], metrics1['dice3']))
    print("Valid -- loss: {:.3f} ET: {:.3f} TC: {:.3f} WT: {:.3f}".format(metrics2['loss'], metrics2['diceET'], metrics2['diceTC'], metrics2['diceWT']))
    print("Test  -- loss: {:.3f} ET: {:.3f} TC: {:.3f} WT: {:.3f}".format(metrics3['loss'], metrics3['diceET'], metrics3['diceTC'], metrics3['diceWT']))

    torch.cuda.empty_cache()  # 手动释放 CUDA 缓存的函数

if __name__ == '__main__':
    main()


