import os
import torch

from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import torch.nn.functional as F
from utils import  transfunc
from torch import nn
from utils.contra_loss import ContrastLoss
from utils.supcon_loss import LocalConLoss
from utils.loss import DiceLossMulti
import time
import numpy as np
import metrics

def color_string(s, front=50, word=32):
    """
    # 改变字符串颜色的函数
    :param str:
    :param front:
    :param word:
    :return:
    """
    new_str = "\033[0;" + str(int(word)) + ";" + str(int(front)) + "m" + s + "\033[0m"
    return new_str


class Trainer(object):

    def __init__(self, model, criterion, optimizer, scheduler, metrics, train_loader, val_loader, args, logger):
        # self.model = model.to(args.device)
        self.model = model.cuda()
        self.train_loader = train_loader
        self.val_loader = val_loader
        self.criterion = criterion
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.metrics = metrics
        self.args = args
        self.logger = logger
        self.contra_pixel_loss = ContrastLoss()
        self.dice_loss_multi = DiceLossMulti(args.num_classes)

        self.contra_coseg_loss = LocalConLoss(temperature=0.07,stride=4)

        if not os.path.exists(args.tensorboard_dir):
            os.makedirs(args.tensorboard_dir)

        self.writer = SummaryWriter(log_dir=args.tensorboard_dir)

        self.inputs = next(iter(self.train_loader))[0]  # 返回(batch_size,通道数，sizeh,sizew)

        self.writer.add_graph(self.model, self.inputs.to(self.args.device, dtype=torch.float32))

        if self.args.DataParallel:
            self.model = torch.nn.DataParallel(self.model)

    def preload(self):

        if self.args.pretrained is True and os.path.isfile(self.args.pretrain_file):
            self.model.load_state_dict(torch.load(self.args.pretrain_file))
            print("succeed to load pretrained file!")
            self.logger.info(color_string(f'load weights:{self.args.pretrain_file} finish!', word=36))

    def train(self):
        self.preload()
        epochs = self.args.epochs  #10
        n_train = len(self.train_loader)
        # print("n_train=",n_train) #3
        save_time = transfunc.AverageMeter()
        step = 0
        best_score = 0.

        # print(f'{self.args.save_path}/{self.args.current_time}_best_model.pth')
        for epoch in range(epochs):
            self.model.train()
            # training
            torch.cuda.empty_cache()
            end = time.time()
            with tqdm(total=n_train, desc=f'Epoch {epoch + 1}/{epochs}', unit='batch',ncols=200) as pbar:
                # 猜测train_loader的shape:[3,2,8,1,256,256]或 [3,2,5,1,256,256]
                for batch in self.train_loader:
                    print("\n第{0}步".format(step+1))

                    images, masks = batch[0], batch[1]
                    # print("train_loader的shape:[{0},{1},{2},{3},{4},{5}]".format(len(self.train_loader),len(batch),len(images),len(images[0]),len(images[0][0]),len(images[0][0][0])))
                    # print("train batch的length ",len(batch))
                    # print("images 的shape ",images.shape)
                    # print("type image={0}  mask={1}".format(type(images),type(masks)))
                    # breast shape image=torch.Size([8, 1, 256, 256])  mask=torch.Size([8, 1, 256, 256])
                    # ACDC shape image=torch.Size([8, 1, 256, 256])  mask=torch.Size([8, 1, 256, 256])
                    # print("shape image={0}  mask={1}".format(images.shape, masks.shape))
                    # print("image.uniq=", images.unique())
                    # print("masks.uniq=", masks.unique())
                    images = images.to(device=self.args.device, dtype=torch.float32)
                    # print("mask.unique", masks.unique())
                    masks = masks.to(device=self.args.device, dtype=torch.long)


                    # print("mask.unique",masks.unique())
                    # exit()
                    self.optimizer.zero_grad()
                    preds,sdm_out_tanh,seg_contra  = self.model(images)

                    head_contra = seg_contra
                    # 对head特征 归一化
                    seg_contra = F.normalize(seg_contra, p=2, dim=1)

                    constra_loss = torch.tensor(0.0)
                    ce_loss = torch.tensor(0.0)
                    dice_loss = torch.tensor(0.0)
                    seg_loss = torch.tensor(0.0)
                    if self.args.loss_seg_strategy == "ce_loss":
                        # print("\npred 的shape ",preds.shape) #pred 的shape  torch.Size([8, 2, 256, 256])
                        # print("mask 的shape ",masks.shape) #mask 的shape  torch.Size([8, 1, 256, 256])
                        # squeeze（）减少一个值是1的维度
                        # unsqueeze（）增加一个值是1的维度
                        # preds 的shape:torch.Size([8, 2, 256, 256]),mask.squeeze(1)的shape:torch.Size([8, 256, 256])??
                        # API文档：CrossEntropyLoss (input, target) ,input输入带通道
                        # Input: Shape (C)(C), (N, C)(N,C) or (N, C, d_1, d_2, ..., d_K)
                        #  Target: If containing class indices, shape ()(), (N)(N) or (N, d_1, d_2, ..., d_K)
                        # 交叉熵损失
                        ce_loss = self.criterion.crossEntropy_loss(preds, masks.squeeze(1))
                        seg_loss = ce_loss
                    elif self.args.loss_seg_strategy == "dice_loss":
                        dice_loss = self.dice_loss_multi(preds,masks)
                        seg_loss = dice_loss
                    elif self.args.loss_seg_strategy == "ce_dice_loss":

                        # 交叉熵损失
                        ce_loss = self.criterion.crossEntropy_loss(preds, masks.squeeze(1))
                        # 输入格式：[16, 4, 256, 256])  masks=torch.Size([16, 1, 256, 256])
                        dice_loss = self.dice_loss_multi(preds, masks)
                        seg_loss = 0.5 * (ce_loss + dice_loss)
                    else :
                        self.logger.info(Exception("没有找到这种 seg loss组合"))
                        exit()

                    if self.args.loss_contra_strategy == "circle_contra_loss":

                        # gt_seg map 转成 符号距离图,归一化到[-1,1]
                        # print("mask_batch.shape",mask_batch.shape)
                        with torch.no_grad():
                            # out_shape = (batchsize,h,w)  ,,mask_batch.shape=[4, 1, 256, 256]
                            sdm_normalize, real_gt_sdf = transfunc.img_to_sdf(masks.cpu().numpy(),out_shape=masks[:, 0, ...].shape)
                            sdm_normalize = torch.from_numpy(sdm_normalize).float().cuda()
                            real_gt_sdf = torch.from_numpy(real_gt_sdf).float().cuda()
                        if self.args.circle_contra_type == "1_layer_cross_image":
                            constra_loss = self.criterion.contras_loss_mean_1layer(real_gt_sdf, seg_contra)
                        else :
                            constra_loss = self.criterion.contras_loss_mean_3layer(real_gt_sdf, seg_contra)

                    elif self.args.loss_strategy == "local_contra_loss":

                        # 对比策略使用局部对比LocalContrastive
                        # torch.split(tensor, split_size, dim=)split_size是切分后(不一定平分)每块的个数，不是切分为多少块！
                        # 第0维个数=bsz + bsz ，拆解预测表示

                        bsz = masks.shape[0] // 2
                        f1, f2 = torch.split(seg_contra, [bsz, bsz], dim=0)
                        seg_contra = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)],dim=1)  # [bsz, n_view, c, img_size, img_size]
                        # 拆解label
                        l1, l2 = torch.split(masks, [bsz, bsz], dim=0)
                        labels = torch.cat([l1.unsqueeze(1), l2.unsqueeze(1)], dim=1)
                        constra_loss = self.contra_coseg_loss(seg_contra)
                    elif self.args.loss_strategy == "mask_contra_loss" :
                        # print("mask==",masks.shape)
                        # print("preds==", preds.shape)
                        # print("seg_contra==", seg_contra.shape)

                        # 对比策略使用mask    pred_seg,pred_embed, target, with_embed=True):
                        constra_loss = self.contra_pixel_loss(preds,seg_contra,masks)
                    else :
                        self.logger.info(Exception("没有找到这种 contra loss组合"))
                        exit()

                    loss = constra_loss + seg_loss
                    loss.requires_grad_(True)

                    loss.backward()
                    #梯度裁剪
                    torch.nn.utils.clip_grad_value_(self.model.parameters(), 0.1)
                    self.optimizer.step()

                    # 单损失
                    # self.writer.add_scalar('{0} Loss/train'.format(self.args.mode_task), loss.item(), step)
                    # pbar.set_postfix(**{'{0} loss(batch)'.format(self.args.mode_task): loss.item()})

                    # 多损失
                    self.writer.add_scalar('{0} Loss/train'.format(self.args.mode_task), loss.item(), step)
                    self.writer.add_scalar('{0} seg_loss/train'.format(self.args.mode_task), seg_loss.item(), step)
                    self.writer.add_scalar('{0} ce_loss/train'.format(self.args.mode_task), ce_loss.item(), step)
                    self.writer.add_scalar('{0} dice_loss/train'.format(self.args.mode_task), dice_loss.item(), step)
                    self.writer.add_scalar('{0} constra_loss/train'.format(self.args.mode_task), constra_loss.item(),step)
                    pbar.set_postfix(**{'(batch)-> loss(batch)'.format(self.args.mode_task): loss.item(), 'seg_loss': seg_loss.item(),'ce_loss': ce_loss.item(),'dice_loss': dice_loss.item(),'constra_loss(batch)': constra_loss.item(),'lr':self.optimizer.param_groups[0]['lr']})
                    pbar.update(1)
                    step = step + 1
                save_time.update(val=time.time() - end)
            # val  val_epoch==4,
            if (epoch + 1) % self.args.val_epoch == 0:
                score_list,number = self.val()
                # 将dice的n个类求和取平均
                dice_score = np.mean(score_list/number,axis=0)[0]
                if dice_score > best_score:
                    best_score = dice_score
                    if self.args.save_path:
                        if not os.path.exists(self.args.save_path):
                            os.makedirs(self.args.save_path)
                        torch.save(self.model.state_dict(), f'{self.args.save_path}/{self.args.current_time}_best_model.pth')
                        self.logger.info(color_string(f'best model saved !', word=33))
                    # 讲对比head的结果保存
                    # if self.args.save_path:
                    #     if not os.path.exists(self.args.save_path):
                    #         os.makedirs(self.args.save_path)
                    #     torch.save(head_contra, f'{self.args.save_path}/{self.args.current_time}_head_{epoch + 1}.pth')
                if self.args.task_name == "breast_UNet":
                    iou_score = np.mean(score_list/number,axis=0)[1]
                    hd95_score = np.mean(score_list/number,axis=0)[2]
                    self.logger.info(f'Epoch-{epoch + 1} {"mean_dice"}:{dice_score},{"mean_iou"}:{iou_score},{"mean_hd95"}:{hd95_score}  {self.args.val_epoch}_epoch_time:{save_time.val}')
                    save_time.reset()
                    self.writer.add_scalar(f'Val_best_dice', best_score, step)
                    self.writer.add_scalar(f'Val_dice', dice_score, step)
                    self.writer.add_scalar(f'Val_iou', iou_score, step)
                    self.writer.add_scalar(f'Val_hd95', hd95_score, step)

                elif self.args.task_name == "ACDC_UNet":
                    dice_score = np.mean(score_list/number,axis=0)[0]
                    iou_score = np.mean(score_list/number,axis=0)[1]
                    hd95_score = np.mean(score_list/number,axis=0)[2]
                    classscore_list = score_list / number
                    self.logger.info(f'Epoch-{epoch + 1} {"Total_mean:Dice/Iou/hd95"}:{dice_score}/{iou_score}/{hd95_score} {"---1_mean:Dice/Iou/hd95"}:{classscore_list[0][0]}/{classscore_list[0][1]}/{classscore_list[0][2]}  {"---2_mean:Dice/Iou/hd95"}:{classscore_list[1][0]}/{classscore_list[1][1]}/{classscore_list[1][2]} {"---3_mean:Dice/Iou/hd95"}:{classscore_list[2][0]}/{classscore_list[2][1]}/{classscore_list[2][2]}   {self.args.val_epoch}_epoch_time:{save_time.val}')
                    save_time.reset()

                    # '1': "---", 'Dice': score_mean[0][0], 'IoU': score_mean[0][1], 'hd95': score_mean[0][2],
                    #                     # '2': "---", 'Dice': score_mean[1][0], 'IoU': score_mean[1][1], 'hd95': score_mean[1][2],
                    #                     # '3': "---", 'Dice': score_mean[2][0], 'IoU': score_mean[2][1], 'hd95': score_mean[2][2],
                    #                     # 'mean': "---", 'Dice': np.mean(score_mean, axis=0)[0], 'IoU': np.mean(score_mean, axis=0)[
                    #                     #     1], 'hd95': np.mean(score_mean, axis=0)[2]

                    for class_i in range(self.args.num_classes-1):
                        self.writer.add_scalar('info/val_{}_dice'.format(class_i + 1), classscore_list[class_i, 0], step)
                        self.writer.add_scalar('info/val_{}_iou'.format(class_i + 1), classscore_list[class_i, 1], step)
                        self.writer.add_scalar('info/val_{}_hd95'.format(class_i + 1), classscore_list[class_i, 2], step)


                    self.writer.add_scalar(f'Val_best_dice', best_score, step)
                    self.writer.add_scalar(f'Val_dice', dice_score, step)
                    self.writer.add_scalar(f'Val_iou', iou_score, step)
                    self.writer.add_scalar(f'Val_hd95', hd95_score, step)
                else :
                    exit()

                self.writer.add_scalar('learning_rate', self.optimizer.param_groups[0]['lr'], step)

                if self.args.save_pred_img:
                    self.writer.add_images('images', images, step)
                    self.writer.add_images('masks/true', masks, step)
                    self.writer.add_images('masks/pred', preds.argmax(dim=1, keepdim=True), step)
                self.scheduler.step(dice_score)

            # save_model_epoch = 9999
            if (epoch + 1) % self.args.save_model_epoch == 0:
                if self.args.save_path:
                    if not os.path.exists(self.args.save_path):
                        os.makedirs(self.args.save_path)
                    model_name = f'{self.args.model}_{self.args.dataset}'
                    torch.save(self.model.state_dict(),f'{self.args.save_path}/{model_name}_{epoch + 1}.pth')
                    self.logger.info(color_string(f'Checkpoint {epoch + 1} saved !'))
        self.writer.close()

    def val(self):
        self.model.train(False)
        self.model.eval()

        val_len = len(self.val_loader)
        print("\nval data length=",val_len)
        batch_dice_score = 0.
        batch_iou_score = 0.0
        batch_hd95_score = 0.0
        number = 0
        score_list = 0.0
        # with在具有__enter__()、__exit()__的方法才能用？！
        with torch.no_grad():
            with tqdm(total=val_len, desc=f'val', unit='batch',ncols=200) as pbar:
                for batch in self.val_loader:
                    # print("\nnumber=",number)
                    images, masks = batch[0], batch[1]
                    images = images.to(self.args.device)
                    masks = masks.to(self.args.device)
                    # print("validate predshape ",images.shape)

                    preds, sdm_out_tanh, seg_contra = self.model(images)

                    preds = preds.argmax(dim=1, keepdim=True)
                    # print("validate predshape ",preds.shape)

                    preds = preds.data.cpu().numpy()
                    masks = masks.data.cpu().numpy()

                    # shape  preds=(16, 1, 256, 256)  masks=(16, 1, 256, 256)
                    # print("shape  preds={0}  masks={1}".format(preds.shape,masks.shape))
                    # exit()

                    batch_score_i = 0.0
                    for _ in range(preds.shape[0]):
                        # reshape(-1)：多维变1维
                        pred_tmp = preds[_, :].reshape(-1)
                        mask_tmp = masks[_, :].reshape(-1)
                        # print("pred_tmp=",np.unique(pred_tmp),np.unique(mask_tmp))
                        # print("pred_tmp",np.unique(pred_tmp))
                        # print("pred.sum",pred_tmp.sum())
                        # dice_score = metrics.dice_val_ext(pred_tmp==1, mask_tmp==1)
                        # iou_score =  metrics.iou_val_ext(pred_tmp==1,mask_tmp==1)
                        # hd95_score = metrics.hd95_val_ext(pred_tmp==1,mask_tmp==1)

                        # dice_score,iou_score,hd95_score = self.metrics(pred_tmp==1,mask_tmp==1)
                        #
                        # batch_dice_score += dice_score
                        # batch_iou_score += iou_score
                        # batch_hd95_score += hd95_score

                        score_i = []
                        print("num_class",self.args.num_classes)
                        # print("88 --dice={0} iou={1} hd95={2}".format(dice_score,iou_score,hd95_score))
                        for class_i in range(1,self.args.num_classes):
                            # if (pred_tmp==class_i).sum() == 0 and (mask_tmp==class_i).sum() == 0:
                            #     print("第{0}类 pred mask 都是0".format(class_i))
                            # elif (pred_tmp==class_i).sum() != 0 and (mask_tmp==class_i).sum() == 0:
                            #     print("第{0}类 pred不是0， mask 是0".format(class_i))
                            # elif (pred_tmp==class_i).sum() == 0 and (mask_tmp==class_i).sum() != 0:
                            #     print("第{0}类 pred是0， mask不是0".format(class_i))
                            # else :
                            #     print("第{0}类 pred mask 都不是0".format(class_i))

                            dice_score_i,iou_score_i,hd95_score_i = self.metrics(pred_tmp==class_i,mask_tmp==class_i)
                            print("99 --dice={0} iou={1} hd95={2}".format(dice_score_i,iou_score_i,hd95_score_i))
                            score_i.append([dice_score_i,iou_score_i,hd95_score_i])
                        # [(dice,iou,hd95),(dice,iou,hd95),(dice,iou,hd95)]
                        # batch_score_i += np.array(score_i)
                        score_list += np.array(score_i)
                        number += 1

                    # score_list += np.array(batch_score_i)
                    # pbar.set_postfix(**{f'{self.metric_dice.name}': batch_score / number})
                    if self.args.task_name == "breast_UNet":
                        # [(dice,iou,hd95)]
                        pbar.set_postfix(**{'Dice':score_list[0][0] / number,'IoU':score_list[0][1]/number,'hd95':score_list[0][2]/number})

                        print("\n yy - Dice:{0}  IoU:{1}  hd95:{2} \n".format(score_list[0][0] / number,
                                                                             score_list[0][1] / number,
                                                                             score_list[0][2] / number))

                        # print("\n ext Dice:{0}  IoU:{1}  hd95:{2} \n".format(batch_dice_score/number , batch_iou_score/number ,batch_hd95_score/number))


                    elif self.args.task_name == "ACDC_UNet":
                        # [(dice, iou, hd95), (dice, iou, hd95), (dice, iou, hd95)]
                        score_mean = score_list / number
                        pbar.set_postfix(**{'1':"---",'Dice': score_mean[0][0], 'IoU': score_mean[0][1], 'hd95': score_mean[0][2],
                                            '2':"---",'Dice': score_mean[1][0], 'IoU': score_mean[1][1], 'hd95': score_mean[1][2],
                                            '3':"---",'Dice': score_mean[2][0], 'IoU': score_mean[2][1], 'hd95': score_mean[2][2],
                                            'mean':"---",'Dice': np.mean(score_mean,axis=0)[0], 'IoU': np.mean(score_mean,axis=0)[1], 'hd95': np.mean(score_mean,axis=0)[2]})
                    else :
                        exit()
                    pbar.update(1)
        return score_list,number
        # return batch_dice_score / number,batch_iou_score/number
