# -*- coding: utf-8 -*-
"""
@Time ： 2023/2/24 10:32
@Auth ： GaoShuai
@File ：DeeplesionInDuDoNetTrainer.py
@IDE ：PyCharm
"""

from __future__ import print_function

import argparse
import os
import sys
import time

import cv2

os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

package_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(package_path)
print(sys.path)

import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim as optim
from loguru import logger
from tensorboardX import SummaryWriter
from root_config import RootConfig

from network.InDuDoNetDeeplesion import InDuDoNetDeeplesion
from network.build_gemotry import generate_deeplesion_odl_operator
from utils.CommonUtils import check_create_dir, get_current_timestamp
from utils.Visualizer import Visualizer
from utils.config import Config
from utils.dataloader import DeeplesionInDuDoNetMultiDataLoader, get_temp_dataloader

os.environ['CUDA_VISIBLE_DEVICES'] = '0'
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str, default=r"D:\gs\code\InDuDoNet\deeplesion\train",
                    help='txt path to training spa-data')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=0)
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--patchSize', type=int, default=364, help='the height / width of the input image to network')
parser.add_argument('--niter', type=int, default=100, help='total number of training epochs')
parser.add_argument('--batchnum', type=int, default=1000,
                    help='batchsize*batchnum=1000 for randomly selecting 1000 imag pairs at every iteration')
parser.add_argument('--num_channel', type=int, default=32,
                    help='the number of dual channels')  # refer to https://github.com/hongwang01/RCDNet for the channel concatenation strategy
parser.add_argument('--T', type=int, default=4, help='the number of ResBlocks in every ProxNet')
parser.add_argument('--S', type=int, default=10, help='the number of total iterative stages')
parser.add_argument('--resume', type=int, default=1, help='continue to train')
parser.add_argument("--milestone", type=int, default=[2, 3, 4], help="When to decay learning rate")
parser.add_argument('--lr', type=float, default=0.0001, help='initial learning rate')
parser.add_argument('--log_dir', default='logs', help='tensorboard logs')
parser.add_argument('--model_dir', default='../models/', help='saving model')
parser.add_argument('--eta1', type=float, default=1, help='initialization for stepsize eta1')
parser.add_argument('--eta2', type=float, default=5, help='initialization for stepsize eta2')
parser.add_argument('--alpha', type=float, default=0.5, help='initialization for weight factor')
parser.add_argument('--gamma', type=float, default=1e-1, help='hyper-parameter for balancing different loss items')
opt = parser.parse_args()

# create path
try:
    os.makedirs(opt.model_dir)
except OSError:
    pass

cudnn.benchmark = True


def train_model(net, optimizer, scheduler, data_loader, test_dataloader, config):
    check_create_dir(os.path.join(RootConfig.project_root_path, config['model_dir'], config.name))
    tmpLogDir = os.path.join(RootConfig.project_root_path, config['log_dir'],
                             config.project_name + config.version + "_" + get_current_timestamp())
    check_create_dir(tmpLogDir)
    writer = SummaryWriter(tmpLogDir)
    step = 0
    visualizer = Visualizer(config, len(data_loader))
    visualizer2 = Visualizer(config, len(data_loader))  # 这个类用来保存图像域的图
    for epoch in range(opt.resume, opt.niter):
        mse_per_epoch = 0
        tic = time.time()
        # train stage
        lr = optimizer.param_groups[0]['lr']
        ii = 0
        for ii, data in enumerate(data_loader):

            Xgt, Xli, Xma, mask, Sgt, Sma, Sli, Smetal = [x.cuda() for x in data]
            net.train()
            optimizer.zero_grad()
            # Xma, Xli, mask, Sma, Sli, Tr
            ListX, ListS, ListYS = net(Xma, Xli, mask, Sma, Sli, Smetal)  # class InDuDoNet
            # print(torch.max(ListX[-1]), torch.min(ListX[-1]))
            # print(torch.max(ListS[-1]), torch.min(ListS[-1]))
            # print(torch.max(ListYS[-1]), torch.min(ListYS[-1]))

            # loss_l2YSmid = 0.1 * F.mse_loss(ListYS[opt.S - 2], Sgt)
            # loss_l2Xmid = 0.1 * F.mse_loss(ListX[opt.S - 2] * (1 - mask), Xgt * (1 - mask))
            # loss_l2YSf = F.mse_loss(ListYS[-1], Sgt)
            # loss_l2Xf = F.mse_loss(ListX[-1] * (1 - mask), Xgt * (1 - mask))
            loss_l2YSmid = 0.1 * F.l1_loss(ListYS[opt.S - 2], Sgt)
            loss_l2Xmid = 0.1 * F.l1_loss(ListX[opt.S - 2] * (1 - mask), Xgt * (1 - mask))
            loss_l2YSf = F.l1_loss(ListYS[-1], Sgt)
            loss_l2Xf = F.l1_loss(ListX[-1] * (1 - mask), Xgt * (1 - mask))
            loss_l2YS = loss_l2YSf + loss_l2YSmid
            loss_l2X = loss_l2Xf + loss_l2Xmid
            loss = opt.gamma * loss_l2YS + loss_l2X
            loss.backward()
            optimizer.step()
            mse_iter = loss.item()
            mse_per_epoch += mse_iter
            if ii % 5 == 0:
                template = '[Epoch:{:>2d}/{:<2d}] {:0>5d}/{:0>5d}, Loss={:5.2e},  Lossl2YS={:5.2e}, Lossl2X={:5.2e}, lr={:.2e}'
                logger.info(
                    template.format(epoch, config["n_epochs"], ii, len(data_loader), mse_iter, loss_l2YS, loss_l2X, lr))
                writer.add_scalar('Loss', loss.item(), step)
                writer.add_scalar('Loss_YS', loss_l2YS.item(), step)
                writer.add_scalar('Loss_X', loss_l2X.item(), step)
            if config['if_show'] and ii % config['show_interval'] == 0:
                show_batch = dict()
                show_batch['Sma_show'] = Sma[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                show_batch['SLI_show'] = Sli[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                S_show_tmp = ListYS[-1][0][0].clone().detach().cpu().unsqueeze(0).numpy()
                S_show_tmp = np.clip(S_show_tmp, 0, 255)
                # S_show_tmp = (S_show_tmp - np.min(S_show_tmp)) / (np.max(S_show_tmp) - np.min(S_show_tmp)) * 255.0
                show_batch['S_show'] = S_show_tmp
                print(np.max(show_batch['S_show']), np.min(show_batch['S_show']))
                # show_batch['YS_show'] = ListYS[-1][0][0].clone().detach().cpu().unsqueeze(0).numpy().astype(np.uint8)
                show_batch['Sgt'] = Sgt[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                visualizer.reset()
                visualizer.display_current_results(show_batch, epoch, ii, True)

                show_batch_2 = dict()
                show_batch_2['Xma_show'] = Xma[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                show_batch_2['XLI_show'] = Xli[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                show_batch_2['X_show'] = np.clip(ListX[-1][0][0].clone().detach().cpu().unsqueeze(0).numpy(), 0, 255)
                print(np.max(show_batch_2['X_show']), np.min(show_batch_2['X_show']))
                show_batch_2['Xgt'] = Xgt[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                visualizer2.reset()
                visualizer2.display_current_results(show_batch_2, epoch, ii, True)

            if ii % config.save_model_interval == 0:
                torch.save(net.state_dict(),
                           os.path.join(RootConfig.project_root_path, config['model_dir'], config.name,
                                        "net_epoch_{:0>2d}_index_{:0>6d}.pt".format(epoch, ii)))
                torch.save(net.state_dict(),
                           os.path.join(RootConfig.project_root_path, config['model_dir'], config.name,
                                        'net_latest.pt'))

            step += 1
        mse_per_epoch /= (ii + 1)
        logger.info('Loss={:+.2e}'.format(mse_per_epoch))
        logger.info('-' * 100)
        scheduler.step()
        # save model every epoch  not every index

        toc = time.time()
        logger.info('This epoch take time {:.2f} s'.format(toc - tic))

        logger.info("start to test the model...")
        with torch.no_grad():
            test_save_root_path = os.path.join(RootConfig.project_root_path, "paper_smooth_debug_L1Loss")
            for test_data in test_dataloader:
                Xgt, Xli, Xma, mask, Sgt, Sma, Sli, Smetal = [x.cuda() for x in test_data]
                ListX, ListS, ListYS = net(Xma, Xli, mask, Sma, Sli, Smetal)
                S_show = ListYS[-1][0][0].clone().detach().cpu().numpy()
                S_show = np.clip(S_show, 0, 255).astype(np.uint8)
                X_show = np.clip(ListX[-1][0][0].clone().detach().cpu().numpy(), 0, 255).astype(np.uint8)
                cv2.imwrite(os.path.join(test_save_root_path, str(epoch) + "_0_S.png"), S_show)
                cv2.imwrite(os.path.join(test_save_root_path, str(epoch) + "_0_X.png"), X_show)

                S_show = ListYS[-1][1][0].clone().detach().cpu().numpy()
                S_show = np.clip(S_show, 0, 255).astype(np.uint8)
                X_show = np.clip(ListX[-1][1][0].clone().detach().cpu().numpy(), 0, 255).astype(np.uint8)
                cv2.imwrite(os.path.join(test_save_root_path, str(epoch) + "_1_S.png"), S_show)
                cv2.imwrite(os.path.join(test_save_root_path, str(epoch) + "_1_X.png"), X_show)
        logger.info("Model testing completed!")

    writer.close()
    logger.info('Reach the maximal epochs! Finish training')


if __name__ == '__main__':
    def print_network(name, net):
        num_params = 0
        for param in net.parameters():
            num_params += param.numel()
        logger.info('name={:s}, Total number={:d}'.format(name, num_params))


    config_file_path = os.path.join(RootConfig.project_root_path, "Yaml", "DeeplesionInDuDoNetTrain.yaml")
    config = Config(config_file_path)

    op_module_fp, op_module_pT = generate_deeplesion_odl_operator()
    net = InDuDoNetDeeplesion(opt, op_module_fp, op_module_pT, config.S_normalize_coefficient).cuda()
    print_network("InDuDoNet:", net)
    optimizer = optim.Adam(net.parameters(), betas=(0.5, 0.999), lr=opt.lr)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestone, gamma=0.5)  # learning rates

    level = 1

    # from opt.resume continue to train
    for _ in range(config.pre_train_epoch_num):
        scheduler.step()
    if config.pre_train_name != "":
        net.load_state_dict(torch.load(
            os.path.join(RootConfig.project_root_path, config.model_dir, config.pre_train_name, 'net_latest.pt')))
        logger.info('loaded checkpoints, epoch{:s}'.format(config.pre_train_name))
    # load dataset
    # train_mask = np.load(os.path.join(opt.data_path, 'trainmask.npy'))
    # # train model

    # with open(config_file_path, 'r') as stream:
    #     config = yaml.load(stream, Loader=yaml.FullLoader)
    # 官方源码中的train mask 直接读的
    # train_dataset_own = InDuDoNetDataset(config)
    train_dataloader = DeeplesionInDuDoNetMultiDataLoader(config, op_module_fp)
    test_dataloader = get_temp_dataloader(op_module_fp)
    # train_dataloader = DeeplesionInDuDoNetMultiDataLoaderVersion2(config, op_module_fp)
    train_model(net, optimizer, scheduler, train_dataloader, test_dataloader, config)
