import os
import sys
from tqdm import tqdm
from tensorboardX import SummaryWriter
import shutil
import argparse
import logging
import time
import random
import numpy as np

import torch
import torch.optim as optim
from torchvision import transforms
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
from torchvision.utils import make_grid

from networks.vnet import VNet
from dataloaders import utils
from utils import ramps, losses
from dataloaders.la_heart import LAHeart, RandomCrop, CenterCrop, RandomRotFlip, ToTensor, TwoStreamBatchSampler

#修改想法：1）将MSE一致性损失函数更换为conf-CE一致性损失函数，2）不确定度评估使用EMA方式
#这个文件是不确定度评估使用EMA方式，最后没采用这种方式

parser = argparse.ArgumentParser()
#训练数据路径
parser.add_argument('--root_path', type=str, default='../data/2018LA_Seg_Training Set/', help='Name of Experiment')
parser.add_argument('--exp', type=str,  default='UAMT', help='model_name')#模型保存路径
parser.add_argument('--max_iterations', type=int,  default=100, help='maximum epoch number to train')#最大迭代次数
parser.add_argument('--batch_size', type=int, default=4, help='batch_size per gpu')#训练batch大小
parser.add_argument('--labeled_bs', type=int, default=2, help='labeled_batch_size per gpu')#标记数据训练batch大小
parser.add_argument('--base_lr', type=float,  default=0.01, help='learning rate')#学习率
parser.add_argument('--deterministic', type=int,  default=1, help='whether use deterministic training')#是否使用确定性训练
parser.add_argument('--seed', type=int,  default=1337, help='random seed')#随机种子
parser.add_argument('--gpu', type=str,  default='0', help='GPU to use')#gpu选择
### costs
parser.add_argument('--ema_decay', type=float,  default=0.99, help='ema_decay')#EMA衰减率α
parser.add_argument('--consistency_type', type=str,  default="mse", help='consistency_type')#损失函数类型
parser.add_argument('--consistency', type=float,  default=0.1, help='consistency')#损失函数权重λ
parser.add_argument('--consistency_rampup', type=float,  default=40.0, help='consistency_rampup')#损失函数权重变化时间，用于改变λ
args = parser.parse_args()

train_data_path = args.root_path#训练数据路径
snapshot_path = "../model/" + args.exp + "/"#模型保存路径


os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu #设置gpu
#args.gpu可能是"0,1"，原始的args.batch_size与GPU数量相乘，得到一个新的、更大的batch size。
#这样做通常是为了利用多GPU并行计算的优势。当使用多个GPU进行训练时，可以同时在每个GPU上处理一批数据
batch_size = args.batch_size * len(args.gpu.split(','))
max_iterations = args.max_iterations
base_lr = args.base_lr
labeled_bs = args.labeled_bs

if args.deterministic:
    cudnn.benchmark = False #禁用cuDNN的自动优化。当 cudnn.benchmark 设置为 True 时，
    #cuDNN 会在首次运行卷积操作时，尝试多种算法，并选择最适合当前硬件和输入数据的最优算法。
    cudnn.deterministic = True
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)#设置CPU随机种子
    torch.cuda.manual_seed(args.seed)#设置GPU随机种子

num_classes = 2 #分类数
patch_size = (112, 112, 80) #输入图像尺寸,一共80张训练数据

def get_current_consistency_weight(epoch):
    # Consistency ramp-up from https://arxiv.org/abs/1610.02242
    # 基于Gaussian warming up function正态增长一致性参数，使得在warmup过程中，
    #一致性损失函数的权重逐渐增加，从而达到平衡训练和一致性损失函数的目的。
    #从0到40的epoch中，权重从0.0006增长到0.1，40步之后一直维持0.1
    return args.consistency * ramps.sigmoid_rampup(epoch, args.consistency_rampup)

def update_ema_variables(model, ema_model, alpha, global_step):
    # Use the true average until the exponential average is more correct
    #根据EMA，让学生模型更新教师模型参数
    alpha = min(1 - 1 / (global_step + 1), alpha)
    for ema_param, param in zip(ema_model.parameters(), model.parameters()):
        ema_param.data.mul_(alpha).add_(param.data,alpha = 1 - alpha)

if __name__ == "__main__":
    ## make logger file
    if not os.path.exists(snapshot_path):
        os.makedirs(snapshot_path) #创建模型保存路径
    if os.path.exists(snapshot_path + '/code'):
        shutil.rmtree(snapshot_path + '/code') #删除原来的代码文件
    # 将当前目录下的文件和文件夹（除了 .git 和 __pycache__）复制到指定路径的 code 文件夹中。
    shutil.copytree('.', snapshot_path + '/code', shutil.ignore_patterns(['.git','__pycache__']))

    # set up logger
    logging.basicConfig(filename=snapshot_path+"/log.txt", level=logging.INFO,
                        format='[%(asctime)s.%(msecs)03d] %(message)s', datefmt='%H:%M:%S')
    logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
    logging.info(str(args))

    def create_model(ema=False):
        # Network definition
        net = VNet(n_channels=1, n_classes=num_classes, normalization='batchnorm', has_dropout=True)
        model = net.cuda()
        if ema: #如果启用了EMA，则将模型参数分离，防止梯度更新，给教师用的，教师模型不通过梯度下降更新参数
            for param in model.parameters():
                param.detach_()
        return model

    model = create_model() #学生模型初始化
    ema_model = create_model(ema=True) #教师模型初始化
    

    db_train = LAHeart(base_dir=train_data_path,
                       split='train',
                       transform = transforms.Compose([
                          RandomRotFlip(), #随机翻转
                          RandomCrop(patch_size),#随机裁剪成112*112*80
                          ToTensor(),#转化为tensor
                          ]))
    db_test = LAHeart(base_dir=train_data_path,
                       split='test',
                       transform = transforms.Compose([
                           CenterCrop(patch_size),
                           ToTensor()
                       ]))
    labeled_idxs = list(range(16)) #标记数据索引，总共16个
    unlabeled_idxs = list(range(16, 80)) #未标记数据索引，总共64个
    #训练时，每一个batch有labeled_bs个标记数据，batch_size-labeled_bs个未标记数据
    batch_sampler = TwoStreamBatchSampler(labeled_idxs, unlabeled_idxs, batch_size, batch_size-labeled_bs)
    
    #uncertainty[输入数据数量(80)，不确定度值(1)，h，w，c]初始化
    uncertainty=torch.zeros([len(db_train), 2, 112, 112, 80]).cuda()
    def worker_init_fn(worker_id):
        random.seed(args.seed+worker_id) #根据线程worker_id设置随机种子
    trainloader = DataLoader(db_train, batch_sampler=batch_sampler, num_workers=0, pin_memory=True,worker_init_fn=worker_init_fn)

    #在训练循环中，通常会在每个 epoch 或 batch 开始前调用 model.train()，确保模型处于正确的训练状态。
    model.train() #设置为训练模式，会启用 Dropout 和 BatchNorm 层等。
    ema_model.train()
    optimizer = optim.SGD(model.parameters(), lr=base_lr, momentum=0.9, weight_decay=0.0001)

    if args.consistency_type == 'mse':
        consistency_criterion = losses.softmax_mse_loss
    elif args.consistency_type == 'kl':
        consistency_criterion = losses.softmax_kl_loss
    elif args.consistency_type == 'ce':
        consistency_criterion = losses.softmax_ce_loss
    else:
        assert False, args.consistency_type

    

    writer = SummaryWriter(snapshot_path+'/log')
    logging.info("{} itertations per epoch".format(len(trainloader)))

    iter_num = 0
    max_epoch = max_iterations//len(trainloader)+1 
    lr_ = base_lr
    model.train()
    for epoch_num in tqdm(range(max_epoch), ncols=70):
        time1 = time.time()
        for i_batch, sampled_batch in enumerate(trainloader):
            time2 = time.time()
            # print('fetch data cost {}'.format(time2-time1))
            volume_batch, label_batch, current_indices= sampled_batch['image'], sampled_batch['label'],sampled_batch.get('idx', None)
            volume_batch, label_batch = volume_batch.cuda(), label_batch.cuda()
            #print(f"Batch {i_batch}: Current indices: {current_indices}")
            #logging.info("Current batch indices: %s", current_indices)

            noise = torch.clamp(torch.randn_like(volume_batch) * 0.1, -0.2, 0.2)#生成高斯噪声
            ema_inputs = volume_batch + noise
            outputs = model(volume_batch) #学生模型输出
        
            with torch.no_grad():
                ema_output = ema_model(ema_inputs) #教师模型输出，[4,2,112,112,80]

            #计算不确定度
            # T = 8
            # volume_batch_r = volume_batch.repeat(2, 1, 1, 1, 1)#将同一个 batch 中的数据复制多次以增加 batch size。
            # stride = volume_batch_r.shape[0] // 2
            # preds = torch.zeros([stride * T, 2, 112, 112, 80]).cuda()
            # for i in range(T//2): #总共进行8次前向传播，实际上是扩大batch2倍，然后进行4次就可以
            #     ema_inputs = volume_batch_r + torch.clamp(torch.randn_like(volume_batch_r) * 0.1, -0.2, 0.2)
            #     with torch.no_grad():
            #         preds[2 * stride * i:2 * stride * (i + 1)] = ema_model(ema_inputs)
            # preds = F.softmax(preds, dim=1)
            # preds = preds.reshape(T, stride, 2, 112, 112, 80)
            # preds = torch.mean(preds, dim=0)  #(batch, 2, 112,112,80)
            # uncertainty = -1.0*torch.sum(preds*torch.log(preds + 1e-6), dim=1, keepdim=True) #(batch, 1, 112,112,80)

            #计算不确定度，使用EMA进行不确定度的计算
            #uncertainty[输入数据数量(80)，不确定度值(1)，h，w，c]
            preds= torch.zeros([volume_batch.shape[0], 2, 112, 112, 80]).cuda()
            preds=F.softmax(ema_output,dim=1)#preds[batch,2,h,w,c]
            #preds=losses.entropy_loss_map(preds, C=2)#preds[batch,1,h,w,c]

            #al = min(1 - 1 / (iter_num + 1), args.ema_decay)
            consistency_weight = get_current_consistency_weight(iter_num//150) #λ
            for batch_idx,data_idx in zip(range(preds.size(0)),current_indices):
                uncertainty[data_idx] = uncertainty[data_idx].data.mul_(consistency_weight).add_(preds[batch_idx].data, alpha = 1 - consistency_weight)
                preds[batch_idx] = uncertainty[data_idx]

            preds = -1.0*torch.sum(preds*torch.log(preds + 1e-6), dim=1, keepdim=True)
            
            ## calculate the loss
            loss_seg = F.cross_entropy(outputs[:labeled_bs], label_batch[:labeled_bs])
            outputs_soft = F.softmax(outputs, dim=1)
            loss_seg_dice = losses.dice_loss(outputs_soft[:labeled_bs, 1, :, :, :], label_batch[:labeled_bs] == 1)

            #consistency_weight = get_current_consistency_weight(iter_num//150) #λ
            consistency_dist = consistency_criterion(outputs, ema_output) #(batch, 2, 112,112,80)
            threshold = (0.75+0.25*ramps.sigmoid_rampup(iter_num, max_iterations))*np.log(2)
            mask = (preds<threshold).float()
            consistency_dist = torch.sum(mask*consistency_dist)/(2*torch.sum(mask)+1e-16)
            consistency_loss = consistency_weight * consistency_dist
            loss = 0.5*(loss_seg+loss_seg_dice) + consistency_loss

            optimizer.zero_grad() #梯度清零
            loss.backward() #计算梯度
            optimizer.step() #更新参数
            update_ema_variables(model, ema_model, args.ema_decay, iter_num)

            iter_num = iter_num + 1
            #记录标量值到 TensorBoard
            writer.add_scalar('uncertainty/mean', uncertainty[0,0].mean(), iter_num)
            writer.add_scalar('uncertainty/max', uncertainty[0,0].max(), iter_num)
            writer.add_scalar('uncertainty/min', uncertainty[0,0].min(), iter_num)
            writer.add_scalar('uncertainty/mask_per', torch.sum(mask)/mask.numel(), iter_num)
            writer.add_scalar('uncertainty/threshold', threshold, iter_num)
            writer.add_scalar('lr', lr_, iter_num)
            writer.add_scalar('loss/loss', loss, iter_num)
            writer.add_scalar('loss/loss_seg', loss_seg, iter_num)
            writer.add_scalar('loss/loss_seg_dice', loss_seg_dice, iter_num)
            writer.add_scalar('train/consistency_loss', consistency_loss, iter_num)
            writer.add_scalar('train/consistency_weight', consistency_weight, iter_num)
            writer.add_scalar('train/consistency_dist', consistency_dist, iter_num)

            logging.info('iteration %d : loss : %f cons_dist: %f, loss_weight: %f' %
                         (iter_num, loss.item(), consistency_dist.item(), consistency_weight))
            if iter_num % 5 == 0:
                image = volume_batch[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/Image', grid_image, iter_num)

                # image = outputs_soft[0, 3:4, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                image = torch.max(outputs_soft[0, :, :, :, 20:61:10], 0)[1].permute(2, 0, 1).data.cpu().numpy()
                image = utils.decode_seg_map_sequence(image)
                grid_image = make_grid(image, 5, normalize=False)
                writer.add_image('train/Predicted_label', grid_image, iter_num)

                image = label_batch[0, :, :, 20:61:10].permute(2, 0, 1)
                grid_image = make_grid(utils.decode_seg_map_sequence(image.data.cpu().numpy()), 5, normalize=False)
                writer.add_image('train/Groundtruth_label', grid_image, iter_num)

                image = uncertainty[current_indices[0], 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/uncertainty', grid_image, iter_num)

                image = preds[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/preds', grid_image, iter_num)

                mask2 = (preds > threshold).float()
                image = mask2[0, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('train/mask', grid_image, iter_num)
                #####
                image = volume_batch[-1, 0:1, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                grid_image = make_grid(image, 5, normalize=True)
                writer.add_image('unlabel/Image', grid_image, iter_num)

                # image = outputs_soft[-1, 3:4, :, :, 20:61:10].permute(3, 0, 1, 2).repeat(1, 3, 1, 1)
                image = torch.max(outputs_soft[-1, :, :, :, 20:61:10], 0)[1].permute(2, 0, 1).data.cpu().numpy()
                image = utils.decode_seg_map_sequence(image)
                grid_image = make_grid(image, 5, normalize=False)
                writer.add_image('unlabel/Predicted_label', grid_image, iter_num)

                image = label_batch[-1, :, :, 20:61:10].permute(2, 0, 1)
                grid_image = make_grid(utils.decode_seg_map_sequence(image.data.cpu().numpy()), 5, normalize=False)
                writer.add_image('unlabel/Groundtruth_label', grid_image, iter_num)

            ## change lr
            if iter_num % 2500 == 0:
                lr_ = base_lr * 0.1 ** (iter_num // 2500)
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr_
            #每1000次保存一次模型
            if iter_num % 1000 == 0:
                save_mode_path = os.path.join(snapshot_path, 'iter_' + str(iter_num) + '.pth')
                torch.save(model.state_dict(), save_mode_path)
                logging.info("save model to {}".format(save_mode_path))

            if iter_num >= max_iterations:
                break
            time1 = time.time()
        if iter_num >= max_iterations:
            break
    save_mode_path = os.path.join(snapshot_path, 'iter_'+str(max_iterations)+'.pth')
    torch.save(model.state_dict(), save_mode_path)
    logging.info("save model to {}".format(save_mode_path))
    writer.close()
