# import numpy as np
# import os
# import torch
# from tqdm import tqdm
# import torch.nn as nn
# from tensorboardX import SummaryWriter
# from torchvision.utils import make_grid
# from torch.utils.data import DataLoader

# from utils import mk_path, _get_logger, loadyaml,build_lr_scheduler
# from model import build_model
# from datasets import build_loader


# def main():

#     path = r"config/CPMT_pascal_split0_resnet50.yaml"
#     root = os.path.dirname(os.path.realpath(__file__))  # 获取绝对路径
#     args = loadyaml(os.path.join(root, path))  # 加载yaml
#     if args.cuda:
#         args.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
#     else:
#         args.device = torch.device("cpu")

#     args.save_path = os.path.join(root, args.save_path)
#     mk_path(args.save_path)
#     mk_path(os.path.join(args.save_path, "tensorboardX"))
#     mk_path(os.path.join(args.save_path, "model"))
#     args.supervise_save_path = os.path.join(args.save_path, "model", "supervise_model.pth")

#     args.writer = SummaryWriter(os.path.join(args.save_path, "tensorboardX"))
#     args.logger = _get_logger(os.path.join(args.save_path, "log.log"), "info")
#     args.tqdm = os.path.join(args.save_path, "tqdm.log")
#     torch.manual_seed(args.seed)
#     torch.cuda.manual_seed(args.seed)
#     np.random.seed(args.seed)

#     torch.backends.cudnn.deterministic = False  # 单卡的不需要分布式
#     torch.backends.cudnn.benchmark = True  # 寻找最佳 的训练路径

#     train_loader, test_loader = build_loader(args)  # 构建数据集
#     args.epochs = args.total_itrs // len(train_loader) + 1
#     args.logger.info("==========> train_loader length:{}".format(len(train_loader.dataset)))
#     args.logger.info("==========> test_dataloader length:{}".format(len(test_loader.dataset)))
#     args.logger.info("==========> epochs length:{}".format(args.epochs))
#     args.logger.info("==========> args:{}".format(args))

#     model = build_model(args=args).to(device=args.device)

#     for param in model.layer0.parameters():
#         param.requires_grad = False
#     for param in model.layer1.parameters():
#         param.requires_grad = False


#     CPMT(model=model, train_loader=train_loader, test_loader=test_loader, args=args)


# def CPMT(model: nn.Module,
#         train_loader: DataLoader,
#         test_loader: DataLoader,
#         args):

#     # optimizer = torch.optim.SGD([param for param in model.parameters() if param.requires_grad],
#     #                              lr=args.lr, 
#     #                              momentum=args.momentum, 
#     #                              weight_decay=args.weight_decay)

#     optimizer = torch.optim.SGD([param for param in model.parameters() if param.requires_grad],
#                                  lr=0.001,momentum=0.9,weight_decay=0.0005)
    
#     lr_scheduler = build_lr_scheduler(args=args, optimizer=optimizer)

#     total_itrs = args.total_itrs // args.batch_size
#     lr_decay_iters = [total_itrs // 3, total_itrs * 2 // 3]
#     print(lr_decay_iters)

#     max_epoch = args.total_itrs // len(train_loader) + 1
#     args.logger.info("==============> max_epoch :{}".format(max_epoch))

#     # config network and criterion
#     criterion = nn.CrossEntropyLoss(ignore_index=255)
#     best_miou = 0.0
#     cur_itrs = 0
#     train_loss = 0.0

#     pbar = tqdm(total=args.total_itrs)

#     for epoch in range(max_epoch):
#         model.train()

#         for module in model.modules():
#             if isinstance(module, torch.nn.BatchNorm2d):
#                 module.eval()

#         for img_s_list, mask_s_list, img_q, mask_q, _, _, _ in train_loader:
#             cur_itrs += 1
#             img_q, mask_q = img_q.to(args.device).float(), mask_q.to(args.device).long()

#             for k in range(len(img_s_list)):
#                 img_s_list[k], mask_s_list[k] = img_s_list[k].to(args.device).float(), mask_s_list[k].to(args.device).long()

#             out_ls = model(img_s_list, mask_s_list, img_q, mask_q)

#             # loss = criterion(out_ls[0], mask_q) + criterion(out_ls[1], mask_q) * 0.2
#             loss = criterion(out_ls, mask_q)

#             # mask_s = torch.cat(mask_s_list, dim=0)

#             # loss = criterion(out_ls[0], mask_q) + criterion(out_ls[1], mask_q) + criterion(out_ls[2], mask_s) * 0.2

#             optimizer.zero_grad()
#             loss.backward()
#             optimizer.step()
#             lr_scheduler.step()

#             if cur_itrs in lr_decay_iters:
#                 optimizer.param_groups[0]['lr'] /= 10.0

#             lr = optimizer.param_groups[0]["lr"]

#             train_loss += loss.item()
#             args.writer.add_scalar('SSP/loss', loss.item(), cur_itrs)
#             args.writer.add_scalar('SSP/lr', lr, cur_itrs)

#             if cur_itrs % args.step_size == 0:
#                 #  进行验证
#                 model.eval()
#                 miou = evaluate(cur_itrs, model, test_loader, args)
#                 args.writer.add_scalar('SSP/miou', miou, cur_itrs)

#                 if miou > best_miou:
#                     best_miou = miou
#                     torch.save({
#                         "model": model.state_dict(),
#                         "optimizer": optimizer.state_dict(),
#                         "cur_itrs": cur_itrs,
#                         "best_miou": best_miou
#                     }, args.supervise_save_path)
                
#                 args.logger.info("miou: {:.4f}, best miou: {:.4f}".format(miou, best_miou))
                
#                 model.train()

#                 for module in model.modules():
#                     if isinstance(module, torch.nn.BatchNorm2d):
#                         module.eval()

#             if cur_itrs > args.total_itrs:
#                 return

#             pbar.update(1)

#         args.logger.info("Train [{}/{} ({:.0f}%)]\t loss: {:.5f} ".format(cur_itrs, args.total_itrs,
#                                                                           100. * cur_itrs / args.total_itrs,
#                                                                           train_loss/len(train_loader.dataset)
#                                                                           ))
#         train_loss = 0


# class mIOU:
#     def __init__(self, num_classes):
#         self.num_classes = num_classes
#         self.hist = np.zeros((num_classes, num_classes))

#     def _fast_hist(self, label_pred, label_true):
#         mask = (label_true >= 0) & (label_true < self.num_classes)
#         hist = np.bincount(
#             self.num_classes * label_true[mask].astype(int) +
#             label_pred[mask], minlength=self.num_classes ** 2).reshape(self.num_classes, self.num_classes)
#         return hist

#     def add_batch(self, predictions, gts):
#         for lp, lt in zip(predictions, gts):
#             self.hist += self._fast_hist(lp.flatten(), lt.flatten())

#     def evaluate(self):
#         iu = np.diag(self.hist) / (self.hist.sum(axis=1) + self.hist.sum(axis=0) - np.diag(self.hist))
#         return np.nanmean(iu[1:])


# PALETTE = np.array([
#     [0, 0, 0],
#     [255, 255, 255],
# ])


# def evaluate(cur_itrs, model, dataloader, args):
#     tbar = tqdm(dataloader)

#     num_classes = 21 if args.datasets == 'pascal' else 81
#     metric = mIOU(num_classes)

#     for i, (img_s_list, mask_s_list, img_q, mask_q, cls, _, id_q) in enumerate(tbar):
#         img_q, mask_q = img_q.cuda(), mask_q.cuda()
#         for k in range(len(img_s_list)):
#             img_s_list[k], mask_s_list[k] = img_s_list[k].cuda(), mask_s_list[k].cuda()

#         cls = cls[0].item()

#         with torch.no_grad():
#             out_ls = model(img_s_list, mask_s_list, img_q, mask_q)
#             pred = torch.argmax(out_ls, dim=1)

#         if i == 10:
#             #  绘图
#             image = make_grid(img_q.detach(), nrow=1, normalize=True, scale_each=True)
#             args.writer.add_image('SSP_Image/image_q', image.squeeze(), cur_itrs)

#             label_pred = pred.detach().squeeze().cpu().numpy()
#             label_pred = label_pred.astype(np.uint8)
#             img = PALETTE[label_pred]
#             img = img.astype(np.uint8)
#             args.writer.add_image('SSP_Image/label_pred', img, cur_itrs, dataformats='HWC')

#             label_true = mask_q.detach().squeeze().cpu().numpy()
#             label_true = label_true.astype(np.uint8)
#             label_true[label_true == 255] = 0
#             img = PALETTE[label_true]
#             img = img.astype(np.uint8)
#             args.writer.add_image('SSP_Image/label_true', img, cur_itrs, dataformats='HWC')

#         pred[pred == 1] = cls
#         mask_q[mask_q == 1] = cls

#         metric.add_batch(pred.cpu().numpy(), mask_q.cpu().numpy())
#         tbar.set_description("Testing mIOU: %.2f" % (metric.evaluate() * 100.0))

#     return metric.evaluate() * 100.0


# if __name__ == '__main__':
#     main()
