import os
import logging
import numpy as np
import torch
import torch.nn as nn
from models.modules import *
from models.optimizer import Optimizer


def gen_loss_with_reward(inputs, reward):
    gen_loss = torch.mean(-torch.log(inputs) * reward)
    return gen_loss


def build_match_model(config):
    emb_path = os.path.join(config['data_dir'], config['emb_file'])
    logging.info("loading word embeddings from {} ...".format(emb_path))
    emb_vectors = torch.from_numpy(np.load(emb_path))
    logging.info("load word embeddings({}*{}) over!".format(emb_vectors.size(0), emb_vectors.size(1)))

    if config['matcher'] == 'BiLSTM':
        matcher = BiLSTMMatcher(emb_vectors, config['hidden_size'], dropout_p=config['dropout'])

    elif config['matcher'] == 'CNN':
        matcher = CNNMatcher(emb_vectors,
                             kernels=config['kernel_sizes'],
                             num_filters=config['num_filters'],
                             dropout_p=config['dropout'])

    elif config['matcher'] == 'MultiChannel':
        matcher = MultiChannelMatcher(emb_vectors,
                                      hidden_size=config['hidden_size'],
                                      kernels=config['kernel_sizes'],
                                      num_filters=config['num_filters'],
                                      dropout_p=config['dropout'])
    else:
        raise NotImplementedError("Not Implemented Matcher: {}".format(config['matcher']))

    return matcher


def load_or_build_pairwise_model(config, ckpt_path=None):
    # build model
    matcher = build_match_model(config)

    # build optimizer
    optimizer = Optimizer(params=matcher.parameters(),
                          opt_name=config['optimizer'],
                          lr=config['lr'],
                          max_grad_norm=config['max_grad_norm'])

    if ckpt_path:
        logging.info("loading checkpoint from {} ...".format(ckpt_path))
        checkpoint = torch.load(ckpt_path)
        resume_dict = checkpoint['model']
        restore_dict = matcher.state_dict()
        resume_dict = {k: v for k, v in resume_dict.items() if k in restore_dict}
        restore_dict.update(resume_dict)
        matcher.load_state_dict(restore_dict)

        optimizer.optimizer.load_state_dict(checkpoint['optimizer'])

    logging.info("build model over")
    logging.info(matcher)

    loss = nn.MarginRankingLoss(margin=config['margin'])
    logging.info("build loss over")
    logging.info(loss)

    # set optimizer
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer.optimizer, mode="max")
    optimizer.set_scheduler(scheduler)

    logging.info('...building optimizer over')
    logging.info(optimizer)
    logging.info('optimizer: {}, lr: {}'.format(config['optimizer'], config['lr']))
    # print("embeddings idx 20")
    # print(matcher.embeddings.weight[20, :].tolist())

    return matcher, optimizer, loss


def load_or_build_irgan_model(gen_config, dis_config, ckpt_dir):
    # build matcher in generator and discriminator
    generator = build_match_model(gen_config)
    discriminator = build_match_model(dis_config)

    # build optimizer
    gen_optimizer = Optimizer(params=generator.parameters(),
                              opt_name=gen_config['optimizer'],
                              lr=gen_config['lr'],
                              max_grad_norm=gen_config['max_grad_norm'])

    dis_optimizer = Optimizer(params=discriminator.parameters(),
                              opt_name=gen_config['optimizer'],
                              lr=gen_config['lr'],
                              max_grad_norm=gen_config['max_grad_norm'])

    if ckpt_dir and gen_config["ckpt_file"]:
        ckpt_path = os.path.join(ckpt_dir, gen_config['ckpt_file'])
        logging.info("loading generator checkpoint from {} ...".format(ckpt_path))
        checkpoint = torch.load(ckpt_path)
        resume_dict = checkpoint['model']
        # restore_dict = generator.state_dict()
        # resume_dict = {k: v for k, v in resume_dict.items() if k in restore_dict}
        # restore_dict.update(resume_dict)
        generator.load_state_dict(resume_dict)

        # do not load optimizer
        # gen_optimizer.optimizer.load_state_dict(checkpoint['optimizer'])

    if ckpt_dir and dis_config["ckpt_file"]:
        ckpt_path = os.path.join(ckpt_dir, dis_config['ckpt_file'])
        logging.info("loading discriminator checkpoint from {} ...".format(ckpt_path))
        checkpoint = torch.load(ckpt_path)
        resume_dict = checkpoint['model']
        # restore_dict = discriminator.state_dict()
        # resume_dict = {k: v for k, v in resume_dict.items() if k in restore_dict}
        # restore_dict.update(resume_dict)
        discriminator.load_state_dict(resume_dict)

        # dis_optimizer.optimizer.load_state_dict(checkpoint['optimizer'])

    logging.info("build generator model over")
    logging.info(generator)

    logging.info("build discriminator model over")
    logging.info(discriminator)

    # generator loss by gradient policy
    gen_loss = gen_loss_with_reward
    # discriminator loss: hinge margin loss, same with matcher
    dis_loss = nn.MarginRankingLoss(margin=dis_config['margin'])

    return (generator, gen_loss, gen_optimizer), (discriminator, dis_loss, dis_optimizer)
    pass
