# !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on Tue Nov 5 11:56:06 2020
@author: hongwang (hongwang01@stu.xjtu.edu.cn)
MICCAI2021: ``InDuDoNet: An Interpretable Dual Domain Network for CT Metal Artifact Reduction''
paper link： https://arxiv.org/pdf/2109.05298.pdf
"""
from __future__ import print_function

import argparse
import os
import sys
import time
from math import ceil

import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torch.nn.parallel
import torch.optim as optim
from tensorboardX import SummaryWriter

from network.InDuDoNetRatFemur import InDuDoNetRatFemur
from utils.Visualizer import Visualizer
from utils.config import Config
from utils.dataloader import RatFemurInDuDoNetMultiDataLoader


os.environ['CUDA_VISIBLE_DEVICES'] = '0'
parser = argparse.ArgumentParser()
parser.add_argument("--data_path", type=str, default=r"D:\gs\code\InDuDoNet\deeplesion\train",
                    help='txt path to training spa-data')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=0)
parser.add_argument('--batchSize', type=int, default=1, help='input batch size')
parser.add_argument('--patchSize', type=int, default=364, help='the height / width of the input image to network')
parser.add_argument('--niter', type=int, default=100, help='total number of training epochs')
parser.add_argument('--batchnum', type=int, default=1000,
                    help='batchsize*batchnum=1000 for randomly selecting 1000 imag pairs at every iteration')
parser.add_argument('--num_channel', type=int, default=32,
                    help='the number of dual channels')  # refer to https://github.com/hongwang01/RCDNet for the channel concatenation strategy
parser.add_argument('--T', type=int, default=4, help='the number of ResBlocks in every ProxNet')
parser.add_argument('--S', type=int, default=10, help='the number of total iterative stages')
parser.add_argument('--resume', type=int, default=0, help='continue to train')
parser.add_argument("--milestone", type=int, default=[40, 80], help="When to decay learning rate")
parser.add_argument('--lr', type=float, default=0.0002, help='initial learning rate')
parser.add_argument('--log_dir', default='./logs/', help='tensorboard logs')
parser.add_argument('--model_dir', default='./models/', help='saving model')
parser.add_argument('--eta1', type=float, default=1, help='initialization for stepsize eta1')
parser.add_argument('--eta2', type=float, default=5, help='initialization for stepsize eta2')
parser.add_argument('--alpha', type=float, default=0.5, help='initialization for weight factor')
parser.add_argument('--gamma', type=float, default=1e-1, help='hyper-parameter for balancing different loss items')
opt = parser.parse_args()

# create path
try:
    os.makedirs(opt.model_dir)
except OSError:
    pass

cudnn.benchmark = True


def train_model(net, optimizer, scheduler, data_loader, config):
    # data_loader = DataLoader(datasets, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers),
    #                          pin_memory=True)
    # data_loader = DataLoader(datasets, batch_size=config['batchSize'], shuffle=True, num_workers=int(opt.workers),
    #                          pin_memory=True, drop_last=False)
    num_data = len(data_loader)
    num_iter_epoch = ceil(num_data / opt.batchSize)
    writer = SummaryWriter(opt.log_dir)
    step = 0
    config['len_pre_epoch'] = len(data_loader)
    config.len_pre_epoch = len(data_loader)
    visualizer = Visualizer(config)
    config.name = config.name + "_X"
    visualizer2 = Visualizer(config)  # 这个类用来保存投影域的图
    for epoch in range(opt.resume, opt.niter):
        mse_per_epoch = 0
        tic = time.time()
        # train stage
        lr = optimizer.param_groups[0]['lr']
        phase = 'train'
        for ii, data in enumerate(data_loader):

            Xgt, Xli, Xma, mask, Sgt, Sma, Sli, Smetal = [x.cuda() for x in data]
            # # 已验证过，下面的显示都正常
            # Xma_show = (Xma.clone().detach().cpu().numpy()[0][0]).astype(np.uint8)
            # cv2.imshow("Xma_show", Xma_show)
            # XLI_show = (XLI.clone().detach().cpu().numpy()[0][0]).astype(np.uint8)
            # cv2.imshow("XLI_show", XLI_show)
            # Xgt_show = (Xgt.clone().detach().cpu().numpy()[0][0]).astype(np.uint8)
            # cv2.imshow("Xgt_show", Xgt_show)
            # Sma_show = (Sma.clone().detach().cpu().numpy()[0][0]).astype(np.uint8)
            # cv2.imshow("Sma_show", Sma_show)
            # SLI_show = (SLI.clone().detach().cpu().numpy()[0][0]).astype(np.uint8)
            # cv2.imshow("SLI_show", SLI_show)
            # Sgt_show = (Sgt.clone().detach().cpu().numpy()[0][0]).astype(np.uint8)
            # cv2.imshow("Sgt_show", Sgt_show)
            # cv2.waitKey(0)
            # cv2.destroyAllWindows()
            net.train()
            optimizer.zero_grad()
            # Xma, Xli, mask, Sma, Sli, Tr
            ListX, ListS, ListYS = net(Xma, Xli, mask, Sma, Sli, Smetal)  # class InDuDoNet
            # print(torch.max(ListX[-1]), torch.min(ListX[-1]))
            # print(torch.max(ListS[-1]), torch.min(ListS[-1]))
            # print(torch.max(ListYS[-1]), torch.min(ListYS[-1]))
            loss_l2YSmid = 0
            loss_l2Xmid = 0
            iter = opt.S - 1
            for j in range(iter):
                loss_l2YSmid = loss_l2YSmid + 0.1 * F.mse_loss(ListYS[j], Sgt)
                loss_l2Xmid = loss_l2Xmid + 0.1 * F.mse_loss(ListX[j] * (1 - mask), Xgt * (1 - mask))
            loss_l2YSf = F.mse_loss(ListYS[-1], Sgt)
            loss_l2Xf = F.mse_loss(ListX[-1] * (1 - mask), Xgt * (1 - mask))
            loss_l2YS = loss_l2YSf + loss_l2YSmid
            loss_l2X = loss_l2Xf + loss_l2Xmid
            loss = opt.gamma * loss_l2YS + loss_l2X
            loss.backward()
            optimizer.step()
            mse_iter = loss.item()
            mse_per_epoch += mse_iter
            if ii % 5 == 0:
                template = '[Epoch:{:>2d}/{:<2d}] {:0>5d}/{:0>5d}, Loss={:5.2e},  Lossl2YS={:5.2e}, Lossl2X={:5.2e}, lr={:.2e}'
                print(
                    template.format(epoch, config["n_epochs"], ii, len(data_loader), mse_iter, loss_l2YS, loss_l2X, lr))
            if config['if_show'] and ii % config['show_interval'] == 0:
                show_batch = dict()
                show_batch['Sma_show'] = Sma[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                show_batch['SLI_show'] = Sli[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                S_show_tmp = np.clip(ListYS[-1][0][0].clone().detach().cpu().unsqueeze(0).numpy(), 0, 255)
                # S_show_tmp = (S_show_tmp - np.min(S_show_tmp)) / (np.max(S_show_tmp) - np.min(S_show_tmp)) * 255.0
                show_batch['S_show'] = S_show_tmp
                print(np.max(show_batch['S_show']), np.min(show_batch['S_show']))
                # show_batch['YS_show'] = ListYS[-1][0][0].clone().detach().cpu().unsqueeze(0).numpy().astype(np.uint8)
                show_batch['Sgt'] = Sgt[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                visualizer.reset()
                visualizer.display_current_results(show_batch, epoch, ii, True)

                show_batch_2 = dict()
                show_batch_2['Xma_show'] = Xma[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                show_batch_2['XLI_show'] = Xli[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                show_batch_2['X_show'] = np.clip(ListX[-1][0][0].clone().detach().cpu().unsqueeze(0).numpy(), 0, 255)
                print(np.max(show_batch_2['X_show']), np.min(show_batch_2['X_show']))
                show_batch_2['Xgt'] = Xgt[0][0].clone().detach().cpu().unsqueeze(0).numpy()
                visualizer2.reset()
                visualizer2.display_current_results(show_batch_2, epoch, ii, True)

            writer.add_scalar('Loss', loss, step)
            writer.add_scalar('Loss_YS', loss_l2YS, step)
            writer.add_scalar('Loss_X', loss_l2X, step)
            step += 1
        mse_per_epoch /= (ii + 1)
        print('Loss={:+.2e}'.format(mse_per_epoch))
        print('-' * 100)
        scheduler.step()
        # save model every epoch  not every index
        torch.save(net.state_dict(), os.path.join(config['model_dir'], 'net_latest.pt'))
        if epoch % config['save_model_interval'] == 0:
            # save model
            model_prefix = 'model_'
            save_path_model = os.path.join(opt.model_dir, model_prefix + str(epoch))
            torch.save({
                'epoch': epoch,
                'step': step,
            }, save_path_model)
            torch.save(net.state_dict(), os.path.join(opt.model_dir, 'net_epoch%3d.pt' % (epoch)))
        toc = time.time()
        print('This epoch take time {:.2f}'.format(toc - tic))
    writer.close()
    print('Reach the maximal epochs! Finish training')


if __name__ == '__main__':
    def print_network(name, net):
        num_params = 0
        for param in net.parameters():
            num_params += param.numel()
        print('name={:s}, Total number={:d}'.format(name, num_params))


    net = InDuDoNetRatFemur(opt).cuda()
    print_network("InDuDoNet:", net)
    optimizer = optim.Adam(net.parameters(), betas=(0.5, 0.999), lr=opt.lr)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestone, gamma=0.5)  # learning rates
    # from opt.resume continue to train
    for _ in range(opt.resume):
        scheduler.step()
    if opt.resume:
        net.load_state_dict(torch.load(os.path.join(opt.model_dir, 'net_%d.pt' % (opt.resume + 1))))
        print('loaded checkpoints, epoch{:d}'.format(opt.resume))
    # load dataset
    # train_mask = np.load(os.path.join(opt.data_path, 'trainmask.npy'))
    # # train model
    level = 1

    config_file_path = r"../Yaml/RatFemurInDuDoNetTrain.yaml"
    config = Config(config_file_path)
    # with open(config_file_path, 'r') as stream:
    #     config = yaml.load(stream, Loader=yaml.FullLoader)
    # 官方源码中的train mask 直接读的
    # train_dataset_own = InDuDoNetDataset(config)
    train_dataloader = RatFemurInDuDoNetMultiDataLoader(config)
    train_model(net, optimizer, scheduler, train_dataloader, config)
