import os
from typing import Union

import numpy
import torch
import varname
from torch.optim import lr_scheduler

from .MPN import MPN
from .base_model import BaseModel
from torch import nn
from networks.DuDoNet.UNetOfficial import UNet
from utils.CommonUtils import show_model_structure


class DuDoNet(BaseModel):

    def __init__(self, config, op_module_pT):
        BaseModel.__init__(self, config)

        self.S_normalize_coefficient = config.S_normalize_coefficient
        # define the model

        self.op_module_pT = op_module_pT
        self.IE_Net = UNet(config).cuda()
        self.SE_Net = MPN(config).cuda()
        self.model_names.append("IE_Net")
        self.model_names.append("SE_Net")

        #
        self.Xma = None
        self.YLI = None
        self.XLI = None
        self.Xgt = None
        self.Ygt = None
        self.metal_trace = None
        self.Y_out = None
        self.Y_tmp_out = None
        self.X_recon = None
        self.residual = None
        self.X_out = None

        self.variable_list = [
            "Xma",
            "XLI",
            "X_recon",
            "residual",
            "X_out",
            "Xgt"]
        self.variable_list_Y = [
            "YLI",
            "metal_trace",
            "Y_tmp_out",
            "Y_out",
            "Ygt"
        ]

        # loss
        self.Loss_IE_Net = None
        self.Loss_RC = None
        self.Loss_SE_Net = None
        self.total_Loss = None
        self.loss_names.append("Loss_IE_Net")
        self.loss_names.append("Loss_RC")
        self.loss_names.append("Loss_SE_Net")
        self.loss_names.append("total_Loss")

        # optimizer
        self.optimizer_IE_Net = torch.optim.Adam(self.IE_Net.parameters(), lr=self.config.lr,
                                                 betas=(self.config.beta1, 0.999))
        self.optimizer_SE_Net = torch.optim.Adam(self.SE_Net.parameters(), lr=self.config.lr,
                                                 betas=(self.config.beta1, 0.999))

        # 每执行step_size次step(),学习率会乘gamma
        self.scheduler_IE_Net = lr_scheduler.StepLR(self.optimizer_IE_Net, step_size=self.config.lr_decay_iters,
                                                    gamma=0.5)
        self.scheduler_SE_Net = lr_scheduler.StepLR(self.optimizer_SE_Net, step_size=self.config.lr_decay_iters,
                                                    gamma=0.5)

        # loss function
        self.loss_function = nn.L1Loss()

    def set_input(self, input):
        """
        input 应该是个list 包含无个元素 从左到右分别是Xma XLI Xgt YLI metal_trace Ygt
          return [torch.cat(Xgt_list, 0),
                torch.cat(Xli_list, 0), torch.cat(Xma_list, 0), torch.cat(Xmetal_list, 0),
                torch.cat(Sgt_list, 0), torch.cat(Sli_list, 0), torch.cat(Sma_list, 0),
                torch.cat(Smetal_list, 0)]
                Sgt = self.op_module_fp(Xgt / 255.) / self.S_normalize_coefficient * 255.
        :param input:
        :return:
        """

        self.Xma = (input[2].cuda().float()) / 255. * 2 - 1
        self.XLI = (input[1].cuda().float()) / 255. * 2 - 1
        self.Xgt = (input[0].cuda().float()) / 255. * 2 - 1

        self.YLI = (input[5].cuda().float()) / 255. * 2 - 1
        self.metal_trace = input[-1].cuda().float()
        self.Ygt = (input[4].cuda().float()) / 255. * 2 - 1  # 经过验证，Ygt的值域确实在-1到1之间

    def forward(self):
        self.Y_tmp_out = self.SE_Net(self.YLI, self.metal_trace)
        self.Y_out = self.Y_tmp_out * self.metal_trace + self.YLI * (1 - self.metal_trace)
        self.Y_out = torch.clip(self.Y_out, -1, 1)
        # GX = self.op_module_pT((ESX / 255) * self.S_normalization_coefficient)
        tmp_Y_input = (self.Y_out + 1) / 2 * self.S_normalize_coefficient
        self.X_recon = self.op_module_pT(tmp_Y_input)
        self.X_recon = self.X_recon * 2 - 1  # 归一化到-1 到1之间
        self.X_recon = torch.clip(self.X_recon, -1, 1)
        # self.X_out = self.IE_Net(torch.concat([self.XLI, self.X_recon], 1))
        self.residual = self.IE_Net(torch.concat([self.XLI, self.X_recon], 1))
        self.X_out = self.X_recon + self.residual
        self.X_out = torch.clip(self.X_out, -1, 1)

    def backward(self):
        self.Loss_SE_Net = self.loss_function(self.Y_out, self.Ygt)
        self.Loss_RC = self.loss_function(self.X_recon, self.Xgt)
        self.Loss_IE_Net = self.loss_function(self.X_out, self.Xgt)
        self.total_Loss = self.Loss_SE_Net + self.Loss_RC + self.Loss_IE_Net
        self.total_Loss.backward()

    def optimize_parameters(self):
        self.forward()
        self.optimizer_IE_Net.zero_grad()
        self.optimizer_SE_Net.zero_grad()
        self.backward()
        self.optimizer_IE_Net.step()
        self.optimizer_SE_Net.step()

    def show_model_structure(self):
        for model_name in self.model_names:
            net = getattr(self, model_name)
            show_model_structure(self.X_out, net)

    def save_networks(self, epoch, index):
        """Save all the networks to the disk.

        Parameters:
            epoch (int) -- current epoch; used in the file name '%s_net_%s.pth' % (epoch, name)
        """
        for name in self.model_names:
            if isinstance(name, str):
                save_filename = '%s_epoch%s_index%s.pth' % (name, epoch, index)
                latest_filename = '%s_latest.pth' % name
                save_path = os.path.join(self.model_save_dir, save_filename)
                latest_save_path = os.path.join(self.model_save_dir, latest_filename)
                net = getattr(self, name)
                torch.save(net.state_dict(), save_path)
                self.logger.info(save_filename + " has been saved!")
                torch.save(net.state_dict(), latest_save_path)
                self.logger.info(latest_filename + " has been saved!")

    def load_parameters(self, parameters_file_path, epoch: int = -1, index: int = -1):
        if epoch == -1 and index == -1:
            self.IE_Net.load_state_dict(torch.load(
                os.path.join(self.config.basic_dir, self.config.model_save_dir, parameters_file_path,
                             "IE_Net_latest.pth")))
            self.SE_Net.load_state_dict(torch.load(
                os.path.join(self.config.basic_dir, self.config.model_save_dir, parameters_file_path,
                             "SE_Net_latest.pth")))
            self.logger.info("The model has load parameters from " + parameters_file_path)
        elif epoch > -1 and index > -1:
            self.IE_Net.load_state_dict(
                torch.load(os.path.join(self.config.basic_dir, self.config.model_save_dir, parameters_file_path,
                                        'IE_Net_epoch%s_index%s.pth' % (epoch, index))))
            self.SE_Net.load_state_dict(
                torch.load(os.path.join(self.config.basic_dir, self.config.model_save_dir, parameters_file_path,
                                        'SE_Net_epoch%s_index%s.pth' % (epoch, index))))
            self.logger.info("The model has load parameters from " + parameters_file_path)
        else:
            self.logger.error("Please check your epoch and index number of the saved model")
            raise Exception("Wrong epoch or index")

    def get_current_visuals_custom(self, index=0, want_key: Union[list, tuple, str] = "") -> Union[dict, numpy.ndarray]:
        tmp_dict = dict()
        if index >= 0:
            for variable_name in self.variable_list:
                tmp_dict[variable_name] = (getattr(self, variable_name)[
                                               index].clone().detach().cpu().numpy() + 1) / 2 * 255
        elif index == -1:
            for variable_name in self.variable_list:
                tmp_dict[variable_name] = (getattr(self, variable_name).clone().detach().cpu().numpy() + 1) / 2 * 255
        if want_key == "":
            return tmp_dict
        if isinstance(want_key, (list, tuple)):
            return_dict = dict()
            for k in want_key:
                return_dict[k] = tmp_dict[k]
            return return_dict
        else:
            return tmp_dict[want_key]

    def get_current_visuals_custom_Y(self, index=0, want_key: Union[list, tuple, str] = "") -> Union[
        dict, numpy.ndarray]:
        tmp_dict = dict()
        if index >= 0:
            for variable_name in self.variable_list_Y:
                tmp_dict[variable_name] = (getattr(self, variable_name)[
                                               index].clone().detach().cpu().numpy() + 1) / 2 * 255
        elif index == -1:
            for variable_name in self.variable_list_Y:
                tmp_dict[variable_name] = (getattr(self, variable_name).clone().detach().cpu().numpy() + 1) / 2 * 255
        if want_key == "":
            return tmp_dict
        if isinstance(want_key, (list, tuple)):
            return_dict = dict()
            for k in want_key:
                return_dict[k] = tmp_dict[k]
            return return_dict
        else:
            return tmp_dict[want_key]

    def update_learning_rate(self):
        old_lr = self.optimizer_IE_Net.param_groups[0]['lr']
        self.scheduler_IE_Net.step()
        self.scheduler_SE_Net.step()
        new_lr = self.optimizer_IE_Net.param_groups[0]['lr']
        self.logger.info('learning rate change: from  %.7f  to  %.7f' % (old_lr, new_lr))
