
# import torch
# import torch.nn as nn
# from einops import rearrange, repeat
# import torch.nn.functional as F
# from torchvision.transforms import transforms


# class Pixel_Class_Contrastive_Loss(nn.Module):
#     def __init__(self, num_classes=4):
#         super(Pixel_Class_Contrastive_Loss, self).__init__()
#         self.num_classes = num_classes

#     def contrastiveLoss(self, pos, neg, temperature=0.1):
#         """
#         :param pos(Tensor): Nx1 positive similarity.
#         :param neg(Tensor): Nxk negative similarity.
#         :return dict[str, Tensor]:  A dictionary of loss components.
#         """
#         criterion = nn.CrossEntropyLoss()
#         N = pos.size(0)
#         logits = torch.cat((pos, neg), dim=1)
#         logits /= temperature
#         labels = torch.zeros((N, ), dtype=torch.long).to(pos.device)
#         losses = criterion(logits, labels)
#         return losses

#     # 定义对比学习loss,使得类内分离
#     def forward(self, features: torch.Tensor, memory_features, labels, student_predict, teacher_predict):
#         '''
#         features: [b,c,h,w]
#         memory_features: [b,c,h,w]
#         labels: [b,h,w]
#         student_predict: [b,num_class,h,w]
#         teacher_predict: [b,num_class,h,w]
#         return:  returns the contrastive loss between features vectors from [features] and from [memory] in a class-wise fashion.
#         '''
#         loss = 0.0
#         student_predict = torch.argmax(torch.softmax(student_predict, dim=1), dim=1, keepdim=False)  # [b,h,w]
#         teacher_predict = torch.argmax(torch.softmax(teacher_predict, dim=1), dim=1, keepdim=False)  # [b,h,w]

#         mask_prediction_correctly_student = ((student_predict == labels).float() * (student_predict > 0).float()).bool()
#         mask_prediction_correctly_teacher = ((teacher_predict == labels).float() * (teacher_predict > 0).float()).bool()

#         features = features.permute(0, 2, 3, 1)  # [b,c,h,w]->[b,h,w,c]
#         memory_features = memory_features.permute(0, 2, 3, 1)  # [b,c,h,w]->[b,h,w,c]

#         features = features[mask_prediction_correctly_student]  # 选取student预测正确的特征  #[b*h*w,c]
#         memory_features = memory_features[mask_prediction_correctly_teacher]  # 选取teacher预测正确的特征  #[b*h*w,c]

#         mask_label_correctly_student = student_predict[mask_prediction_correctly_student].reshape(-1)  # [b*h*w]
#         mask_label_correctly_teacher = teacher_predict[mask_prediction_correctly_teacher].reshape(-1)  # [b*h*w]

#         length = 1024
#         oppose_length = 1024*self.num_classes
#         for c in range(1, self.num_classes):
#             student_mask_c = mask_label_correctly_student == c
#             teacher_mask_c = mask_label_correctly_teacher == c
#             teacher_oppose_c = mask_label_correctly_teacher != c

#             if teacher_mask_c.sum().data < length or teacher_oppose_c.sum() < length or teacher_oppose_c.sum() < oppose_length:
#                 continue

#             features_c = features[student_mask_c, ...][0:length, ...]
#             memory_c = memory_features[teacher_mask_c, ...][0:length, ...]
#             oppose_c = memory_features[teacher_oppose_c, ...][0:oppose_length, ...]

#             features_c = F.normalize(features_c, dim=1)
#             memory_c = F.normalize(memory_c, dim=1)
#             oppose_c = F.normalize(oppose_c, dim=1).permute(1, 0)

#             l_pos = torch.einsum('nc,nc->n', [features_c, memory_c]).unsqueeze(-1)
#             l_neg = torch.einsum('nc,ck->nk', [features_c, oppose_c])
#             loss += self.contrastiveLoss(l_pos, l_neg, temperature=0.1)

#         return loss


# if __name__ == '__main__':
#     # print(224*224*32/(56*56*8))
#     # features = torch.randn(8, 32, 224, 224).cuda()
#     # memory_features = torch.randn(8, 32, 224, 224).cuda()
#     # labels = torch.randint(low=0, high=4, size=(8, 224, 224)).cuda()

#     # student_predict = torch.randn(8, 4, 224, 224).cuda().cuda()
#     # teacher_predict = torch.randn(8, 4, 224, 224).cuda().cuda()

#     # pixel_loss = Pixel_Contrastive_Loss(num_classes=4).cuda()
#     # loss = pixel_loss(features, memory_features, labels, student_predict, teacher_predict)
#     # print(loss)

#     # x=torch.randn(4,512,1,1)
#     # y=x.expand(-1,-1,60,60)

#     print(y.shape)
import numpy as np
import os
import torch
from tqdm import tqdm
import torch.nn as nn
from tensorboardX import SummaryWriter
from torchvision.utils import make_grid,save_image
from torch.utils.data import DataLoader

from utils import mk_path, _get_logger, loadyaml
from model import build_model
from datasets import build_loader


def main():

    path = r"config/SSP_pascal_split0_resnet50.yaml"
    root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
    args = loadyaml(os.path.join(root, path))  # 加载yaml
    if args.cuda:
        args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
    else:
        args.device = torch.device("cpu")

    args.save_path = os.path.join(root, args.save_path)
    mk_path(args.save_path)
    mk_path(os.path.join(args.save_path, "tensorboardX"))
    mk_path(os.path.join(args.save_path, "model"))
    args.supervise_save_path = os.path.join(args.save_path, "model", "supervise_model.pth")

    args.writer = SummaryWriter(os.path.join(args.save_path, "tensorboardX"))
    args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")
    args.tqdm = os.path.join(args.save_path, "tqdm.log")
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)

    torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
    torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径

    train_loader, test_loader = build_loader(args)  # 构建数据集
    args.epochs = args.total_itrs // len(train_loader) + 1
    args.logger.info("==========> train_loader length:{}".format(len(train_loader.dataset)))
    args.logger.info("==========> test_dataloader length:{}".format(len(test_loader.dataset)))
    args.logger.info("==========> epochs length:{}".format(args.epochs))

    model = build_model(args=args).to(device=args.device)

    msg=model.load_state_dict(torch.load(args.supervise_save_path)["model"],strict=True)
    print(msg)

    # for param in model.layer0.parameters():
    #     param.requires_grad = False
    # for param in model.layer1.parameters():
    #     param.requires_grad = False

    # for module in model.modules():
    #     if isinstance(module, torch.nn.BatchNorm2d):
    #         for param in module.parameters():
    #             param.requires_grad = False

    SSP(model=model, train_loader=train_loader, test_loader=test_loader, args=args)


def SSP(model: nn.Module,
        train_loader: DataLoader,
        test_loader: DataLoader,
        args):

    optimizer = torch.optim.SGD([param for param in model.parameters() if param.requires_grad],
                                 lr=args.lr, 
                                 momentum=args.momentum, 
                                 weight_decay=args.weight_decay)

    total_itrs = args.total_itrs // args.batch_size
    lr_decay_iters = [total_itrs // 3, total_itrs * 2 // 3]
    print(lr_decay_iters)

    max_epoch = args.total_itrs // len(train_loader) + 1
    args.logger.info("==============> max_epoch :{}".format(max_epoch))
    model.eval()
    
    # config network and criterion
    criterion = nn.CrossEntropyLoss(ignore_index=255)
    best_miou = 0.0
    cur_itrs = 0
    train_loss = 0.0
    miou = evaluate(cur_itrs, model, test_loader, args)

    # pbar = tqdm(total=args.total_itrs)

    # for epoch in range(max_epoch):
    #     model.train()

    #     for module in model.modules():
    #         if isinstance(module, torch.nn.BatchNorm2d):
    #             module.eval()

    #     for img_s_list, mask_s_list, img_q, mask_q, _, _, _ in train_loader:
    #         cur_itrs += 1
    #         img_q, mask_q = img_q.to(args.device).float(), mask_q.to(args.device).long()

    #         for k in range(len(img_s_list)):
    #             img_s_list[k], mask_s_list[k] = img_s_list[k].to(args.device).float(), mask_s_list[k].to(args.device).long()

    #         out_ls = model(img_s_list, mask_s_list, img_q, mask_q)
    #         mask_s = torch.cat(mask_s_list, dim=0)

    #         if args.refine:
    #             loss = criterion(out_ls[0], mask_q) + criterion(out_ls[1], mask_q) + criterion(out_ls[2], mask_q) + criterion(out_ls[3], mask_s) * 0.2
    #         else:
    #             loss = criterion(out_ls[0], mask_q) + criterion(out_ls[1], mask_q) + criterion(out_ls[2], mask_s) * 0.2

    #         optimizer.zero_grad()
    #         loss.backward()
    #         optimizer.step()

    #         if cur_itrs in lr_decay_iters:
    #             optimizer.param_groups[0]['lr'] /= 10.0

    #         lr = optimizer.param_groups[0]["lr"]

    #         train_loss += loss.item()
    #         args.writer.add_scalar('SSP/loss', loss.item(), cur_itrs)
    #         args.writer.add_scalar('SSP/lr', lr, cur_itrs)

    #         if cur_itrs % args.step_size == 0:
    #             #  进行验证
    #             model.eval()

    #             #          split0 split1 split2 split3 Mean
    #             # 1-shot : 61.4   67.2   65.4   49.7   60.9
    #             # 5-shot : 68.0   72.0   74.8   60.2   68.8
                

                
    #             args.writer.add_scalar('SSP/miou', miou, cur_itrs)

    #             if miou > best_miou:
    #                 best_miou = miou
    #                 torch.save({
    #                     "model": model.state_dict(),
    #                     "optimizer": optimizer.state_dict(),
    #                     "cur_itrs": cur_itrs,
    #                     "best_miou": best_miou
    #                 }, args.supervise_save_path)
                
    #             args.logger.info("miou: {:.4f}, best miou: {:.4f}".format(miou, best_miou))
                
    #             model.train()

    #             for module in model.modules():
    #                 if isinstance(module, torch.nn.BatchNorm2d):
    #                     module.eval()

    #         if cur_itrs > args.total_itrs:
    #             return

    #         pbar.update(1)

    #     args.logger.info("Train [{}/{} ({:.0f}%)]\t loss: {:.5f} ".format(cur_itrs, args.total_itrs,
    #                                                                       100. * cur_itrs / args.total_itrs,
    #                                                                       train_loss/len(train_loader.dataset)
    #                                                                       ))
    #     train_loss = 0


class mIOU:
    def __init__(self, num_classes):
        self.num_classes = num_classes
        self.hist = np.zeros((num_classes, num_classes))

    def _fast_hist(self, label_pred, label_true):
        mask = (label_true >= 0) & (label_true < self.num_classes)
        hist = np.bincount(
            self.num_classes * label_true[mask].astype(int) +
            label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
        return hist

    def add_batch(self, predictions, gts):
        for lp, lt in zip(predictions, gts):
            self.hist += self._fast_hist(lp.flatten(), lt.flatten())

    def evaluate(self):
        iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
        return np.nanmean(iu[1:])


PALETTE = np.array([
    [0, 0, 0],
    [255, 255, 255],
])


def evaluate(cur_itrs, model, dataloader, args):
    tbar = tqdm(dataloader)

    num_classes = 21 if args.datasets == 'pascal' else 81
    metric = mIOU(num_classes)

    for i, (img_s_list, mask_s_list, img_q, mask_q, cls, _, id_q) in enumerate(tbar):
        img_q, mask_q = img_q.cuda(), mask_q.cuda()
        for k in range(len(img_s_list)):
            img_s_list[k], mask_s_list[k] = img_s_list[k].cuda(), mask_s_list[k].cuda()

        cls = cls[0].item()

        with torch.no_grad():
            out_ls = model(img_s_list, mask_s_list, img_q, mask_q)
            pred = torch.argmax(out_ls[0], dim=1)

        # if i == 10:
            #  绘图
        # image = make_grid(img_q.detach(), nrow=1, normalize=True, scale_each=True)

        save_image(img_q.detach(), r"/home/ubuntu/code/pytorch_code/test/image/{}_image.png".format(i), nrow=1, normalize=True, scale_each=True)
        # args.writer.add_image('SSP_Image/image_q', image.squeeze(), cur_itrs)

        label_pred = pred.detach().squeeze().cpu().numpy()
        label_pred = label_pred.astype(np.uint8)
        img = PALETTE[label_pred]
        img = img.astype(np.uint8)
        import os 

        filename=os.path.join(r"/home/ubuntu/code/pytorch_code/test/image/{}_label_pred.png".format(i))
        from PIL import Image
        Image.fromarray(img).save(filename)

        # args.writer.add_image('SSP_Image/label_pred', img, cur_itrs, dataformats='HWC')

        label_true = mask_q.detach().squeeze().cpu().numpy()
        label_true = label_true.astype(np.uint8)
        label_true[label_true == 255] = 0
        img = PALETTE[label_true]
        img = img.astype(np.uint8)

        filename=os.path.join(r"/home/ubuntu/code/pytorch_code/test/image/{}_label_true.png".format(i))
        Image.fromarray(img).save(filename)

        # args.writer.add_image('SSP_Image/label_true', img, cur_itrs, dataformats='HWC')

        pred[pred == 1] = cls
        mask_q[mask_q == 1] = cls

        # metric.add_batch(pred.cpu().numpy(), mask_q.cpu().numpy())
        # tbar.set_description("Testing mIOU: %.2f" % (metric.evaluate() * 100.0))

    return metric.evaluate() * 100.0


if __name__ == '__main__':
    main()
