import time
import datetime
from os.path import join as ospj
import os

from paddle import fluid
import numpy as np
from munch import Munch
import paddle

from utils.compare_params import load_params_from_pytorch
from core.model import build_model
from core.checkpoint import CheckpointIO
from core.data_loader import InputFetcher
from utils.logger import get_logger
from core import utils
logger = get_logger(__name__)


def count_params(shape):
    count = 1
    for s in shape:
        count *= s
    return count


def print_network(network, name):
    num_params = 0
    for p in network.parameters():
        num_params += count_params(p.shape)
    # print(network)
    logger.info("Number of parameters of %s: %i" % (name, num_params))


class Solver(fluid.dygraph.Layer):
    def __init__(self, args, place):
        super(Solver, self).__init__()
        self.args = args
        self.place = place
        with fluid.dygraph.guard(self.place):
            self.nets, self.nets_ema = build_model(args)
            for name, module in self.nets.items():
                print_network(module, name)


            if args.mode == 'train':
                self.optims = Munch()
                for net in self.nets.keys():
                    if net == 'fan':
                        continue
                    self.optims[net] = fluid.optimizer.Adam(
                        parameter_list=self.nets[net].parameters(),
                        learning_rate=args.f_lr if net == 'mapping_network' else args.lr,
                        beta1=args.beta1,
                        beta2=args.beta2,
                        # variant
                        regularization=fluid.regularizer.L2Decay(args.weight_decay)
                    )

                self.ckptios = [
                    CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_nets.ckpt'), **self.nets),
                    CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_nets_ema.ckpt'), **self.nets_ema),
                    # CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_optims.ckpt'), **self.optims)
                ]
            else:
                self.ckptios = [CheckpointIO(ospj(args.checkpoint_dir, '{:06d}_nets_ema.ckpt'), **self.nets_ema)]

    def _save_checkpoint(self, step):
        for ckptio in self.ckptios:
            ckptio.save(step)

    def _load_checkpoint(self, step):
        for ckptio in self.ckptios:
            ckptio.load(step)

    def _reset_grad(self):
        for model in self.nets.values():
            model.clear_gradients()

    def set_eval(self, models:list):
        for model in models:
            model.eval()

    def set_train(self, models: list):
        for model in models:
            model.train()

    def debug_generator_model(self, model, input_shapes):
        logger.debug(f'input_shapes: {input_shapes}')
        x = fluid.dygraph.to_variable(np.ones(input_shapes[0])).astype('float32')
        s = fluid.dygraph.to_variable(np.ones(input_shapes[1])).astype('float32')
        # inputs = (fluid.dygraph.to_variable(np.zeros(input_shape)).astype('float32') for input_shape in input_shapes)
        model.eval()
        out = model(x, s)
        logger.debug(f'out: {out.shape}')

    def train(self, loaders):
        args = self.args
        nets = self.nets
        nets_ema = self.nets_ema
        optims = self.optims

        # remember the initial value of ds weight
        initial_lambda_ds = args.lambda_ds


        logger.info('Start training...')
        start_time = time.time()

        x_reals = []
        y_orgs = []
        x_refs = []
        x_ref2s = []
        y_trgs = []
        z_trgs = []
        z_trg2s = []

        x_reals_file = 'x_reals.npy'
        y_orgs_file = 'y_orgs.npy'
        x_refs_file = 'x_refs.npy'
        x_ref2s_file = 'x_ref2s.npy'
        y_trgs_file = 'y_trgs.npy'
        z_trgs_file = 'z_trgs.npy'
        z_trg2s_file = 'z_trg2s.npy'

        x_reals = np.load(x_reals_file)
        y_orgs = np.load(y_orgs_file)
        x_refs = np.load(x_refs_file)
        x_ref2s = np.load(x_ref2s_file)
        y_trgs = np.load(y_trgs_file)
        z_trgs = np.load(z_trgs_file)
        z_trg2s = np.load(z_trg2s_file)


        debug_step = -1
        debug_index = 0
        with fluid.dygraph.guard(self.place):
            # fetch random validation images for debugging
            fetcher = InputFetcher(loaders.src, loaders.ref, args.latent_dim, 'train')
            fetcher_val = InputFetcher(loaders.val, None, args.latent_dim, 'val')
            inputs_val = next(fetcher_val)
            # resume training if necessary
            # if args.resume_iter > 0:
                # if debug_step == -1:
                #     self._load_checkpoint(args.resume_iter)
                # else:
                #     load_params_from_pytorch('nets.pkl',**nets)

            self.set_train([nets.discriminator, nets.generator, nets.mapping_network, nets.style_encoder])
            for i in range(args.resume_iter, args.total_iters):
                # self.debug_generator_model(nets.generator, ([4, 3, 256, 256], [4, 64]))
                # self.debug_generator_model(nets.mapping_network, ([4, 16], [4]))
                # self.debug_generator_model(nets.style_encoder, ([4, 3, 256, 256], [4]))
                # self.debug_generator_model(nets.discriminator, ([4, 3, 256, 256], [4]))
                if debug_step != -1:
                    index = debug_index % debug_step
                    x_real = fluid.dygraph.to_variable(x_reals[index]).astype('float32')
                    y_org = fluid.dygraph.to_variable(y_orgs[index]).astype('int32')
                    x_ref = fluid.dygraph.to_variable(x_refs[index]).astype('float32')
                    x_ref2 = fluid.dygraph.to_variable(x_ref2s[index]).astype('float32')
                    y_trg = fluid.dygraph.to_variable(y_trgs[index]).astype('int32')
                    z_trg = fluid.dygraph.to_variable(z_trgs[index]).astype('float32')
                    z_trg2 = fluid.dygraph.to_variable(z_trg2s[index]).astype('float32')

                # logger.debug(f'x_real: {x_real.shape}')
                # logger.debug(f'y_org: {y_org.shape}')
                # logger.debug(f'x_ref: {x_ref.shape}')
                # logger.debug(f'x_ref2: {x_ref2.shape}')
                # logger.debug(f'y_trg: {y_trg.shape}')
                # logger.debug(f'z_trg: {z_trg.shape}')
                # logger.debug(f'z_trg2: {z_trg2.shape}')

                # print(nets.mapping_network.state_dict()['shared.0.weight'].numpy()[0, :])

                # params = {}
                # for k in nets.generator.state_dict().keys():
                #     params[k] = nets.generator.state_dict()[k].shape
                # logger.debug(f'{params}')
                #
                # for k in nets.mapping_network.state_dict().keys():
                #     params[k] = nets.mapping_network.state_dict()[k].shape
                # logger.debug(f'{params}')
                #
                # for k in nets.style_encoder.state_dict().keys():
                #     params[k] = nets.style_encoder.state_dict()[k].shape
                # logger.debug(f'{params}')
                #
                # for k in nets.discriminator.state_dict().keys():
                #     params[k] = nets.discriminator.state_dict()[k].shape
                # logger.debug(f'{params}')

                if debug_step == -1:
                    inputs = next(fetcher)
                    x_real, y_org = inputs.x_src, inputs.y_src
                    x_ref, x_ref2, y_trg = inputs.x_ref, inputs.x_ref2, inputs.y_ref
                    z_trg, z_trg2 = inputs.z_trg, inputs.z_trg2
                    x_real = fluid.dygraph.to_variable(x_real).astype('float32')
                    y_org = fluid.dygraph.to_variable(y_org).astype('int32')
                    x_ref = fluid.dygraph.to_variable(x_ref).astype('float32')
                    x_ref2 = fluid.dygraph.to_variable(x_ref2).astype('float32')
                    y_trg = fluid.dygraph.to_variable(y_trg).astype('int32')
                    z_trg = fluid.dygraph.to_variable(z_trg).astype('float32')
                    z_trg2 = fluid.dygraph.to_variable(z_trg2).astype('float32')

                # logger.debug(f'y_org: {y_org.numpy()}, y_trg: {y_trg.numpy()}')

                masks = nets.fan.get_heatmap(x_real) if args.w_hpf > 0 else None

                # train the discriminator
                # self.set_eval([nets.generator, nets.mapping_network, nets.style_encoder])
                # self.set_train([nets.discriminator])
                d_loss, d_losses_latent = compute_d_loss(
                    nets, args, x_real, y_org, y_trg, z_trg=z_trg, masks=masks)
                self._reset_grad()
                d_loss.backward()
                optims.discriminator.minimize(d_loss)
                # nets.discriminator.clear_gradients()

                d_loss, d_losses_ref = compute_d_loss(
                    nets, args, x_real, y_org, y_trg, x_ref=x_ref, masks=masks)
                self._reset_grad()
                d_loss.backward()
                optims.discriminator.minimize(d_loss)
                # nets.discriminator.clear_gradients()

                # train the generator
                # self.set_train([nets.generator, nets.mapping_network, nets.style_encoder])
                # self.set_eval([nets.discriminator])
                g_loss, g_losses_latent = compute_g_loss(
                    nets, args, x_real, y_org, y_trg, z_trgs=[z_trg, z_trg2], masks=masks)
                self._reset_grad()
                g_loss.backward()
                optims.generator.minimize(d_loss)
                optims.mapping_network.minimize(d_loss)
                optims.style_encoder.minimize(d_loss)
                # nets.generator.clear_gradients()
                # nets.mapping_network.clear_gradients()
                # nets.style_encoder.clear_gradients()

                # self.set_eval([nets.mapping_network, nets.style_encoder])
                g_loss, g_losses_ref = compute_g_loss(
                    nets, args, x_real, y_org, y_trg, x_refs=[x_ref, x_ref2], masks=masks)
                self._reset_grad()
                g_loss.backward()
                optims.generator.minimize(d_loss)
                # nets.generator.clear_gradients()

                # compute moving average of network parameters
                moving_average(nets.generator, nets_ema.generator, beta=0.999)
                moving_average(nets.mapping_network, nets_ema.mapping_network, beta=0.999)
                moving_average(nets.style_encoder, nets_ema.style_encoder, beta=0.999)


                # decay weight for diversity sensitive loss
                if args.lambda_ds > 0:
                    args.lambda_ds -= (initial_lambda_ds / args.ds_iter)

                # print out log info
                if (i + 1) % args.print_every == 0:
                    elapsed = time.time() - start_time
                    elapsed = str(datetime.timedelta(seconds=elapsed))[:-7]
                    log = "Elapsed time [%s], Iteration [%i/%i], " % (elapsed, i + 1, args.total_iters)
                    all_losses = dict()
                    for loss, prefix in zip([d_losses_latent, d_losses_ref, g_losses_latent, g_losses_ref],
                                            ['D/latent_', 'D/ref_', 'G/latent_', 'G/ref_']):
                        for key, value in loss.items():
                            all_losses[prefix + key] = value
                    all_losses['G/lambda_ds'] = args.lambda_ds
                    log += ' '.join(['%s: [%.7f]' % (key, value) for key, value in all_losses.items()])
                    # logger.info(f'{log}')')
                    print(log)

                # generate images for debugging
                if (i + 1) % args.sample_every == 0:
                    os.makedirs(args.sample_dir, exist_ok=True)
                    utils.debug_image(nets_ema, args, inputs=inputs_val, step=i + 1)

                # save model checkpoints
                if (i + 1) % args.save_every == 0:
                    self._save_checkpoint(step=i + 1)


                if debug_step != -1 and (debug_index + 1) % debug_step == 0:
                    break
                debug_index += 1

    def sample(self, loaders):
        args = self.args
        nets_ema = self.nets_ema
        os.makedirs(args.result_dir, exist_ok=True)


        with fluid.dygraph.guard(self.place):

            fetcher = InputFetcher(loaders.src, None, args.latent_dim, 'test')
            fetcher_ref = InputFetcher(loaders.ref, None, args.latent_dim, 'test')
            # load_params_from_pytorch('100000_nets_ema.pkl',**nets_ema)
            self._load_checkpoint(args.resume_iter)

            for name, model in nets_ema.items():
                model.eval()
            src = next(fetcher)
            ref = next(fetcher_ref)

            fname = ospj(args.result_dir, 'reference.jpg')
            logger.info('Working on {}...'.format(fname))
            utils.translate_using_reference(nets_ema, args,
                                            fluid.dygraph.to_variable(src.x).astype('float32'),
                                            fluid.dygraph.to_variable(ref.x).astype('float32'),
                                            fluid.dygraph.to_variable(ref.y).astype('float32'),
                                            fname)

    def evaluate(self):
        args = self.args


def moving_average(model, model_test, beta=0.999):
    for param, param_test in zip(model.parameters(), model_test.parameters()):
        # logger.debug(f'param_test: {param_test.numpy()[0]}')
        # logger.debug(f'param: {param.numpy()[0]}')
        param_test.set_value(param.numpy() + (param_test.numpy() - param.numpy()) * beta)
        # break

    # for param, param_test in zip(model.parameters(), model_test.parameters()):
    #     logger.debug(f'11param_test: {param_test.numpy()[0]}')
    #     logger.debug(f'11param: {param.numpy()[0]}')
    #     break

def compute_g_loss(nets, args, x_real, y_org, y_trg, z_trgs=None, x_refs=None, masks=None):
    assert (z_trgs is None) != (x_refs is None)
    if z_trgs is not None:
        z_trg, z_trg2 = z_trgs
    if x_refs is not None:
        x_ref, x_ref2 = x_refs

    # adversarial loss
    if z_trgs is not None:
        s_trg = nets.mapping_network(z_trg, y_trg)
    else:
        s_trg = nets.style_encoder(x_ref, y_trg)

    x_fake = nets.generator(x_real, s_trg, masks=masks)
    out = nets.discriminator(x_fake, y_trg)
    loss_adv = adv_loss(out, 1)

    # style reconstruction loss
    s_pred = nets.style_encoder(x_fake, y_trg)
    loss_sty = paddle.mean(paddle.abs(s_pred - s_trg))

    # diversity sensitive loss
    if z_trgs is not None:
        s_trg2 = nets.mapping_network(z_trg2, y_trg)
    else:
        s_trg2 = nets.style_encoder(x_ref2, y_trg)
    x_fake2 = nets.generator(x_real, s_trg2, masks=masks)
    x_fake2 = x_fake2.detach()
    loss_ds = paddle.mean(paddle.abs(x_fake - x_fake2))

    # cycle-consistency loss
    masks = nets.fan.get_heatmap(x_fake) if args.w_hpf > 0 else None
    s_org = nets.style_encoder(x_real, y_org)
    x_rec = nets.generator(x_fake, s_org, masks=masks)
    # loss_cyc = fluid.layers.reduce_mean(fluid.layers.abs(x_rec - x_real))
    loss_cyc = paddle.mean(paddle.abs(x_rec - x_real))

    loss = loss_adv + args.lambda_sty * loss_sty \
        - args.lambda_ds * loss_ds + args.lambda_cyc * loss_cyc
    return loss, Munch(adv=loss_adv.numpy(),
                       sty=loss_sty.numpy(),
                       ds=loss_ds.numpy(),
                       cyc=loss_cyc.numpy())


def compute_d_loss(nets, args, x_real, y_org, y_trg, z_trg=None, x_ref=None, masks=None):
    assert (z_trg is None) != (x_ref is None)
    # x_real: [4, 3, 256, 256]
    # y_org: [4] [2 1 1 0]
    # y_trg: [4] [2 2 1 2]
    # z_trg: [4, 16]
    # x_ref: [4, 3, 256, 256]

    # print('masks:', masks)
    # print('x_real:', x_real.shape)
    # print('y_org:', y_org.shape, y_org.numpy())
    # print('y_trg:', y_trg.shape, y_trg.numpy())
    # if z_trg is not None:
    #     print('z_trg:', z_trg.shape)
    # if x_ref is not None:
    #     print('x_ref:', x_ref.shape)
    # with real images
    # x_real.requires_grad_()

    x_real.stop_gradient = False
    # print('\ncompute_d_loss   x_real:', x_real.numpy()[0, 0, :15, :15])
    out = nets.discriminator(x_real, y_org)
    # print('compute_d_loss   out:', out.numpy())
    loss_real = adv_loss(out, 1)
    # print('compute_d_loss   loss_real:', loss_real.numpy())
    loss_reg = r1_reg(out, x_real)
    x_real.stop_gradient = True
    # print('compute_d_loss   loss_reg:', loss_reg)

    # with fake images
    #with torch.no_grad():
    with fluid.dygraph.no_grad():
        if z_trg is not None:
            # print('z_trg:', z_trg.numpy())
            # print('y_trg:', y_trg.numpy())
            s_trg = nets.mapping_network(z_trg, y_trg)
        else:  # x_ref is not None
            s_trg = nets.style_encoder(x_ref, y_trg)

        # x_fake: (4, 3, 256, 256)
        # print('x_real:', x_real.numpy()[0, :, :10, :10])
        # s_trg: (4, 64)
        # print('s_trg:', s_trg.numpy()[0])
        x_fake = nets.generator(x_real, s_trg, masks=masks)

    # x_fake: (4, 3, 256, 256)
    # print('x_fake:', x_fake.numpy()[0, :, :10, :10])
    # print('y_trg:', y_trg.numpy())
    out = nets.discriminator(x_fake, y_trg)
    # print('out:', out.numpy())
    loss_fake = adv_loss(out, 0)
    # print('loss_fake:', loss_fake.numpy())

    loss = loss_real + loss_fake + args.lambda_reg * loss_reg
    return loss, Munch(real=loss_real.numpy(),
                       fake=loss_fake.numpy(),
                       reg=loss_reg.numpy())


def adv_loss(logits, target):
    assert target in [1, 0]
    targets = fluid.layers.ones_like(logits) if target == 1 else fluid.layers.zeros_like(logits)
    # targets = fluid.layers.full_like(logits, fill_value=target)
    loss = fluid.layers.sigmoid_cross_entropy_with_logits(logits, targets)
    # loss = fluid.layers.reduce_mean(loss)
    loss = paddle.mean(loss)
    return loss


def r1_reg(d_out, x_in):
    # zero-centered gradient penalty for real images

    batch_size = x_in.shape[0]
    # print('r1_reg   batch_size:', batch_size)
    # print('r1_reg   d_out:', d_out)
    d_out_sum = fluid.layers.reduce_sum(d_out)
    # d_out_sum = d_out[0] + d_out[1] + d_out[2] + d_out[2]
    # print('r1_reg   d_out.sum:', d_out_sum)
    grad_dout = fluid.dygraph.grad(
        outputs=d_out_sum, inputs=x_in,
        create_graph=False, retain_graph=True, only_inputs=True
    )[0]
    # print('r1_reg   grad_dout:', grad_dout.shape)
    grad_dout2 = fluid.layers.pow(grad_dout, factor=2.0)
    # print('r1_reg   grad_dout2:', grad_dout2.shape)
    assert(grad_dout2.shape == x_in.shape)
    # reg = 0.5 * grad_dout2.view(batch_size, -1).sum(1).mean(0)
    reg = fluid.layers.reshape(grad_dout2, (batch_size, -1))
    reg = paddle.sum(reg)
    reg = paddle.mean(reg)
    reg = 0.5 * reg
    return reg
    # return 0