import numpy as np
import os
import torch
from tqdm import tqdm
import torch.nn as nn
from tensorboardX import SummaryWriter
from torchvision.utils import make_grid
from torch.utils.data import DataLoader
import torch.nn.functional as F
from utils import mk_path, _get_logger, loadyaml, build_lr_scheduler
from model import build_model
from datasets import build_loader


def main():

    path = r"config/CPMT_pascal_split0_resnet50.yaml"
    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args = loadyaml(os.path.join(root, path))  # 加载yaml
    if args.cuda:
        args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    else:
        args.device = torch.device("cpu")

    args.save_path = os.path.join(root, args.save_path)
    mk_path(args.save_path)
    mk_path(os.path.join(args.save_path, "tensorboardX"))
    mk_path(os.path.join(args.save_path, "model"))
    args.supervise_save_path = os.path.join(args.save_path, "model", "supervise_model.pth")

    args.writer = SummaryWriter(os.path.join(args.save_path, "tensorboardX"))
    args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")
    args.tqdm = os.path.join(args.save_path, "tqdm.log")
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
    torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径

    train_loader, test_loader = build_loader(args)  # 构建数据集
    args.epochs = args.total_itrs // len(train_loader) + 1
    args.logger.info("==========> train_loader length:{}".format(len(train_loader.dataset)))
    args.logger.info("==========> test_dataloader length:{}".format(len(test_loader.dataset)))
    args.logger.info("==========> epochs length:{}".format(args.epochs))
    args.logger.info("==========> args:{}".format(args))

    model = build_model(args=args).to(device=args.device)

    for param in model.layer0.parameters():
        param.requires_grad = False
    for param in model.layer1.parameters():
        param.requires_grad = False

    CPMT(model=model, train_loader=train_loader, test_loader=test_loader, args=args)


# class Pixel_Class_Contrastive_Loss(nn.Module):
#     def __init__(self):
#         super(Pixel_Class_Contrastive_Loss, self).__init__()

#     def contrastiveLoss(self, pos, neg, temperature=0.1):
#         """
#         :param pos(Tensor): Nx1 positive similarity.
#         :param neg(Tensor): Nxk negative similarity.
#         :return dict[str, Tensor]:  A dictionary of loss components.
#         """
#         criterion = nn.CrossEntropyLoss()
#         N = pos.size(0)
#         logits = torch.cat((pos, neg), dim=1)
#         logits /= temperature
#         labels = torch.zeros((N, ), dtype=torch.long).to(pos.device)
#         losses = criterion(logits, labels)
#         return losses

#     # 定义对比学习loss,使得类内分离
#     def forward(self, support_FP, support_BP, query_FP, query_BP):

#         support_FP = F.normalize(support_FP, dim=1)
#         support_BP = F.normalize(support_BP, dim=1)
#         query_FP = F.normalize(query_FP, dim=1)
#         query_BP = F.normalize(query_BP, dim=1)


#         l_pos_1= F.cosine_similarity(support_FP, query_FP.detach(), dim=-1).unsqueeze(1)
#         l_neg_1= F.cosine_similarity(support_FP, support_BP.detach(), dim=-1).unsqueeze(1)
        
#         loss_1 = self.contrastiveLoss(l_pos_1, l_neg_1, temperature=0.1)

#         l_pos_2= F.cosine_similarity(support_BP, query_BP.detach(), dim=-1).unsqueeze(1)
#         l_neg_2= F.cosine_similarity(support_BP, query_FP.detach(), dim=-1).unsqueeze(1)

#         loss_2 = self.contrastiveLoss(l_pos_2, l_neg_2, temperature=0.1)

#         # l_pos_1 = torch.einsum('nc,nc->n', [support_FP, query_FP]).unsqueeze(-1)
#         # l_neg_1 = torch.einsum('nc,ck->nk', [support_FP, query_BP.permute(1, 0)])
#         # loss_1 = self.contrastiveLoss(l_pos_1, l_neg_1, temperature=0.1)

#         # l_pos_2 = torch.einsum('nc,nc->n', [support_BP, query_BP]).unsqueeze(-1)
#         # l_neg_2 = torch.einsum('nc,ck->nk', [support_BP, query_FP.permute(1, 0)])
#         # loss_2 = self.contrastiveLoss(l_pos_2, l_neg_2, temperature=0.1)

#         loss= (loss_1 + loss_2).mean()
#         return loss


def get_current_consistency_weight(epoch, args):
    # Consistency ramp-up from https://arxiv.org/abs/1610.02242
    return args.consistency * sigmoid_rampup(epoch, args.consistency_rampup)


def sigmoid_rampup(current, rampup_length):
    """Exponential rampup from https://arxiv.org/abs/1610.02242"""
    if rampup_length == 0:
        return 1.0
    else:
        current = np.clip(current, 0.0, rampup_length)
        phase = 1.0 - current / rampup_length
        return float(np.exp(-5.0 * phase * phase))


def weighted_dice_loss(
    prediction,
    target_seg,
    weighted_val: float = 1.0,
    reduction: str = "sum",
    eps: float = 1e-8,
):
    """
    Weighted version of Dice Loss
    Args:
        prediction: prediction
        target_seg: segmentation target
        weighted_val: values of k positives,
        reduction: 'none' | 'mean' | 'sum'
                   'none': No reduction will be applied to the output.
                   'mean': The output will be averaged.
                   'sum' : The output will be summed.
        eps: the minimum eps,
    """
    target_seg = target_seg.unsqueeze(1).float()
    n, _, h, w = target_seg.shape

    prediction = prediction.reshape(-1, h, w)
    target_seg = target_seg.reshape(-1, h, w)
    prediction = prediction.reshape(-1, h*w)
    target_seg = target_seg.reshape(-1, h*w)

    # calculate dice loss
    loss_part = (prediction ** 2).sum(dim=-1) + (target_seg ** 2).sum(dim=-1)
    loss = 1 - 2 * (target_seg * prediction).sum(dim=-1) / torch.clamp(loss_part, min=eps)
    # normalize the loss
    loss = loss * weighted_val

    if reduction == "sum":
        loss = loss.sum()/n
    elif reduction == "mean":
        loss = loss.mean()
    return loss

class WeightedDiceLoss(nn.Module):
    def __init__(
            self,
            weighted_val: float = 1.0,
            reduction: str = "sum",
        ):
        super(WeightedDiceLoss, self).__init__()
        self.weighted_val = weighted_val
        self.reduction = reduction
        
    def forward(self, 
                prediction,
                target_seg,):
        return weighted_dice_loss(
            prediction,
            target_seg,
            self.weighted_val,
            self.reduction,
        )
    
    
def CPMT(model: nn.Module,
         train_loader: DataLoader,
         test_loader: DataLoader,
         args):

    optimizer = torch.optim.SGD([param for param in model.parameters() if param.requires_grad],
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    lr_decay_iters = [5000,10000]

    max_epoch = args.total_itrs // len(train_loader) + 1
    args.logger.info("==============> max_epoch :{}".format(max_epoch))

    # config network and criterion
    criterion = nn.CrossEntropyLoss(ignore_index=255)
    best_miou = 0.0
    cur_itrs = 0
    train_loss = 0.0

    pbar = tqdm(total=args.total_itrs)

    for epoch in range(max_epoch):
        model.train()

        for img_s_list, mask_s_list, img_q, mask_q, _, _, _ in train_loader:
            cur_itrs += 1
            img_q, mask_q = img_q.to(args.device).float(), mask_q.to(args.device).long()

            for k in range(len(img_s_list)):
                img_s_list[k], mask_s_list[k] = img_s_list[k].to(args.device).float(), mask_s_list[k].to(args.device).long()

            out_0, out_ls,out_1 = model(img_s_list, mask_s_list, img_q, mask_q)

            mask_s = torch.cat(mask_s_list, dim=0)
            out_support = torch.cat(out_ls, dim=0)

            # consistency_weight = get_current_consistency_weight(epoch=cur_itrs // 150, args=args)
            
            loss = criterion(out_0, mask_q) + 0.2 * criterion(out_support, mask_s) + criterion(out_1,mask_q)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # model.ema(0.999, cur_itrs)
            # lr_scheduler.step()

            if cur_itrs in lr_decay_iters:
                optimizer.param_groups[0]['lr'] /= 10.0

            lr = optimizer.param_groups[0]["lr"]

            train_loss += loss.item()
            args.writer.add_scalar('SSP/loss', loss.item(), cur_itrs)
            args.writer.add_scalar('SSP/lr', lr, cur_itrs)

            if cur_itrs % args.step_size == 0:
                #  进行验证
                model.eval()
                miou = evaluate(cur_itrs, model, test_loader, args)
                args.writer.add_scalar('SSP/miou', miou, cur_itrs)

                if miou > best_miou:
                    best_miou = miou
                    torch.save({
                        "model": model.state_dict(),
                        "optimizer": optimizer.state_dict(),
                        "cur_itrs": cur_itrs,
                        "best_miou": best_miou
                    }, args.supervise_save_path)

                args.logger.info("miou: {:.4f}, best miou: {:.4f}".format(miou, best_miou))

                model.train()

                # for module in model.modules():
                #     if isinstance(module, torch.nn.BatchNorm2d):
                #         module.eval()

            if cur_itrs > args.total_itrs:
                return

            pbar.update(1)

        args.logger.info("Train [{}/{} ({:.0f}%)]\t loss: {:.5f} ".format(cur_itrs, args.total_itrs,
                                                                          100. * cur_itrs / args.total_itrs,
                                                                          train_loss/len(train_loader.dataset)
                                                                          ))
        train_loss = 0


class mIOU:
    def __init__(self, num_classes):
        self.num_classes = num_classes
        self.hist = np.zeros((num_classes, num_classes))

    def _fast_hist(self, label_pred, label_true):
        mask = (label_true >= 0) & (label_true < self.num_classes)
        hist = np.bincount(
            self.num_classes * label_true[mask].astype(int) +
            label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
        return hist

    def add_batch(self, predictions, gts):
        for lp, lt in zip(predictions, gts):
            self.hist += self._fast_hist(lp.flatten(), lt.flatten())

    def evaluate(self):
        iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
        return np.nanmean(iu[1:])


PALETTE = np.array([
    [0, 0, 0],
    [255, 255, 255],
])


def evaluate(cur_itrs, model, dataloader, args):
    tbar = tqdm(dataloader)

    num_classes = 21 if args.datasets == 'pascal' else 81
    metric = mIOU(num_classes)
    metric1 = mIOU(num_classes)

    for i, (img_s_list, mask_s_list, img_q, mask_q, cls, _, id_q) in enumerate(tbar):
        img_q, mask_q = img_q.cuda(), mask_q.cuda()
        for k in range(len(img_s_list)):
            img_s_list[k], mask_s_list[k] = img_s_list[k].cuda(), mask_s_list[k].cuda()

        cls = cls[0].item()

        with torch.no_grad():
            out_0, out_1 = model(img_s_list, mask_s_list, img_q, mask_q)
            pred = torch.argmax(out_0, dim=1)
            pred1 = torch.argmax(out_1, dim=1)

        if i == 10:
            #  绘图
            image = make_grid(img_q.detach(), nrow=1, normalize=True, scale_each=True)
            args.writer.add_image('SSP_Image/image_q', image.squeeze(), cur_itrs)

            label_pred = pred.detach().squeeze().cpu().numpy()
            label_pred = label_pred.astype(np.uint8)
            img = PALETTE[label_pred]
            img = img.astype(np.uint8)
            args.writer.add_image('SSP_Image/label_pred', img, cur_itrs, dataformats='HWC')

            label_true = mask_q.detach().squeeze().cpu().numpy()
            label_true = label_true.astype(np.uint8)
            label_true[label_true == 255] = 0
            img = PALETTE[label_true]
            img = img.astype(np.uint8)
            args.writer.add_image('SSP_Image/label_true', img, cur_itrs, dataformats='HWC')

        pred[pred == 1] = cls
        pred1[pred1 == 1] = cls
        mask_q[mask_q == 1] = cls

        metric.add_batch(pred.cpu().numpy(), mask_q.cpu().numpy())
        metric1.add_batch(pred1.cpu().numpy(), mask_q.cpu().numpy())
        tbar.set_description("Testing mIOU: {:.2f}".format(metric.evaluate() * 100.0))

    args.logger.info("Testing mIOU: {:.2f} ,{:.2f}".format(metric.evaluate() * 100.0,metric1.evaluate() * 100.0))

    args.writer.add_scalar('SSP/miou1', metric.evaluate() * 100.0, cur_itrs)
    args.writer.add_scalar('SSP/miou2', metric1.evaluate() * 100.0, cur_itrs)

    return metric.evaluate() * 100.0


if __name__ == '__main__':
    main()
