# -*- coding:utf-8 -*-
import os
import time
import logging
import torch
import torch.nn.functional as F
import numpy as np
import json
import models.model_helper as model_helper
from utils.metrics import run_rank_evaluate

LOG_FORMAT = '%(asctime)s %(levelname)s %(message)s'
USE_CUDA = torch.cuda.is_available()


class IRGANTrainer(object):
    def __init__(self, config, expt_dir, ckpt_dir):

        self.config = config

        self._batch_size = config['batch_size']
        self._display_frequency = config['display_frequency']
        self._checkpoint_frequency = config['checkpoint_frequency']
        self.expt_dir = expt_dir
        self.ckpt_path = os.path.join(self.expt_dir, "checkpoints")

        if not os.path.exists(self.expt_dir):
            os.mkdir(self.expt_dir)
        if not os.path.exists(self.ckpt_path):
            os.mkdir(self.ckpt_path)

        # logging file
        log_file = os.path.join(self.expt_dir, 'log.txt')
        logging.basicConfig(format=LOG_FORMAT, level=logging.INFO, filename=log_file, filemode='a')
        console = logging.StreamHandler()
        console.setLevel(logging.INFO)
        formatter = logging.Formatter(LOG_FORMAT)
        console.setFormatter(formatter)
        logging.getLogger('').addHandler(console)

        # save config
        json.dump(config, open(os.path.join(expt_dir, "config.json"), 'w', encoding="utf-8"))
        for key in config:
            logging.info("---{}: {}".format(key, config[key]))

        # load model
        (generator, gen_loss, gen_optimizer), (discriminator, dis_loss, dis_optimizer) = \
            model_helper.load_or_build_irgan_model(config['generator'],
                                                   config['discriminator'],
                                                   ckpt_dir)
        logging.info("USE CUDA: {}".format(USE_CUDA))
        self.device = torch.device("cuda" if USE_CUDA else "cpu")

        # set generator and discriminator related
        self.generator = generator.to(self.device)
        self.gen_criterion = gen_loss  # .to(self.device)
        self.gen_optimizer = gen_optimizer

        self.discriminator = discriminator.to(self.device)
        self.dis_criterion = dis_loss.to(self.device)
        self.dis_optimizer = dis_optimizer
        pass

    def save(self, prefix, step, valid_score, test_score):
        date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())

        torch.save({"generator": self.generator.state_dict(),
                    "discriminator": self.discriminator.state_dict(),
                    "optimizer": self.gen_optimizer.optimizer.state_dict()},
                   os.path.join(self.ckpt_path, self.config['matcher'] + "_"+date_time
                                + "_{}_step_{}_valid_{:.4f}_test_{:.4f}.tar"
                                .format(prefix, step, valid_score, test_score)))
        pass

    def evaluate(self, matcher, valid_iter):
        # set eval mode
        matcher.eval()

        predictions, labels = [], []
        with torch.no_grad():
            for batch_data in valid_iter:
                batch_query_inputs = batch_data[0].to(self.device)
                batch_candidate_inputs = batch_data[1].to(self.device)

                score = matcher.forward(batch_query_inputs, batch_candidate_inputs)
                predictions += score.tolist()
                labels += batch_data[2].tolist()

        # reset to train mode
        matcher.train()

        groups = valid_iter.groups()
        res_dict = run_rank_evaluate(labels, predictions, groups)
        scores = []
        for key in res_dict:
            logging.info("... {}:{:.4f}".format(key, res_dict[key]))
            scores += [res_dict[key]]
        avg_score = sum(scores) / len(scores)
        return avg_score
        pass

    def train_dis_step(self, query_inputs: torch.Tensor, pos_inputs: torch.Tensor, neg_inputs: torch.Tensor):
        n_samples = query_inputs.size(0)
        pools_size = query_inputs.size(1)

        batch_query_inputs = query_inputs.view(-1, query_inputs.size(2)).to(self.device)
        batch_pos_inputs = pos_inputs.view(-1, pos_inputs.size(2)).to(self.device)
        batch_neg_inputs = neg_inputs.view(-1, neg_inputs.size(2)).to(self.device)

        # generate negative samples for discriminator
        self.generator.eval()
        gen_idx = []
        with torch.no_grad():
            # eval gen score
            pos_scores = self.generator.forward(batch_query_inputs, batch_pos_inputs)
            neg_scores = self.generator.forward(batch_query_inputs, batch_neg_inputs)
            gen_scores = (neg_scores - pos_scores).view(n_samples, pools_size)

            for i in range(n_samples):
                # cul gen prob
                exp_rating = np.exp(gen_scores[i].cpu().numpy() * self.config['sampled_temperature'])
                gen_probs = exp_rating / np.sum(exp_rating)
                neg_index = np.random.choice(np.arange(pools_size),
                                             size=self.config['dis_k'],
                                             p=gen_probs, replace=False).tolist()
                # print(",".join(map(str, gen_probs.tolist())))
                for ii in neg_index:
                    gen_idx += [ii + i * pools_size]

        self.generator.train()

        # prepare negative samples
        np.random.shuffle(gen_idx)
        gen_idx = torch.LongTensor(gen_idx)
        sampled_query_inputs = batch_query_inputs[gen_idx]
        sampled_pos_inputs = batch_pos_inputs[gen_idx]
        sampled_neg_inputs = batch_neg_inputs[gen_idx]

        # zero grad
        self.dis_optimizer.optimizer.zero_grad()
        targets = torch.ones(sampled_query_inputs.size(0)).to(self.device)

        pos_logits = self.discriminator.forward(query_inputs=sampled_query_inputs,
                                                candidate_inputs=sampled_pos_inputs)
        neg_logits = self.discriminator.forward(query_inputs=sampled_query_inputs,
                                                candidate_inputs=sampled_neg_inputs)
        # print(pos_score.size(), neg_score.size())
        loss = self.dis_criterion(pos_logits, neg_logits, targets)

        # Perform back propatation
        loss.backward()
        # run optimizer
        self.dis_optimizer.step()
        return loss.item()
        pass

    def train_gen_step(self, query_inputs: torch.Tensor, pos_inputs: torch.Tensor, neg_inputs: torch.Tensor):
        n_samples = query_inputs.size(0)
        pools_size = query_inputs.size(1)

        batch_query_inputs = query_inputs.view(-1, query_inputs.size(2)).to(self.device)
        batch_pos_inputs = pos_inputs.view(-1, pos_inputs.size(2)).to(self.device)
        batch_neg_inputs = neg_inputs.view(-1, neg_inputs.size(2)).to(self.device)

        self.discriminator.eval()
        with torch.no_grad():
            # evaluate reward
            pos_scores = self.discriminator.forward(batch_query_inputs, batch_pos_inputs)
            neg_scores = self.discriminator.forward(batch_query_inputs, batch_neg_inputs)

            reward = torch.sigmoid(neg_scores - pos_scores + self.config['discriminator']['margin']) - 0.5
            # print(",".join(map(str, reward.tolist())))

        self.discriminator.train()

        # train generator
        self.gen_optimizer.optimizer.zero_grad()

        pos_logits = self.generator.forward(batch_query_inputs, batch_pos_inputs)
        neg_logits = self.generator.forward(batch_query_inputs, batch_neg_inputs)

        batch_logits = (neg_logits - pos_logits).view(n_samples, pools_size)
        batch_logits = F.softmax(batch_logits, dim=1)
        # print(batch_logits)

        loss = self.gen_criterion(batch_logits.view(-1), reward)
        loss.backward()
        # run optimization
        self.gen_optimizer.step()
        return loss.item()
        pass

    def train(self, train_iter, valid_iter, test_iter=None):
        logging.info("train number of samples: {}".format(len(train_iter)))
        logging.info("valid number of samples: {}".format(len(valid_iter)))

        config = self.config
        g_step, d_step = 0, 0
        logging.info("Init, DIS evaluate valid ...")
        _ = self.evaluate(self.discriminator, valid_iter)
        if test_iter:
            logging.info("Init, DIS evaluate test ...")
            _ = self.evaluate(self.discriminator, test_iter)
        logging.info("Init, GEN evaluate valid ...")
        _ = self.evaluate(self.generator, valid_iter)
        if test_iter:
            logging.info("Init, GEN evaluate test ...")
            _ = self.evaluate(self.generator, test_iter)

        for epoch in range(config['max_epoch']):
            for _ in range(self.config['g_steps']):
                print_loss = []
                for query_inputs, pos_inputs, neg_inputs in train_iter:
                    g_step += 1
                    loss = self.train_gen_step(query_inputs, pos_inputs, neg_inputs)
                    print_loss += [loss]

                    if g_step % self._display_frequency == 0:
                        print_avg_loss = sum(print_loss) / len(print_loss)
                        logging.info("Epoch: {}; GEN Step: {}; Loss: {:.4f}"
                                     .format(epoch + 1,
                                             g_step,
                                             print_avg_loss))
                        print_loss = []

                    if g_step % self._checkpoint_frequency == 0:
                        logging.info("Epoch: {}; GEN Step: {}; evaluate valid ...".format(epoch + 1, g_step))
                        valid_score = self.evaluate(self.generator, valid_iter)

                        test_score = 0.0
                        if test_iter is not None:
                            logging.info("Epoch: {}; Step: {}; evaluate test ...".format(epoch + 1, g_step))
                            test_score = self.evaluate(self.generator, test_iter)

                        # save checkpoints
                        self.save('gen', g_step, valid_score, test_score)

            for _ in range(self.config['d_steps']):
                print_loss = []
                for query_inputs, pos_inputs, neg_inputs in train_iter:
                    d_step += 1
                    loss = self.train_dis_step(query_inputs, pos_inputs, neg_inputs)
                    print_loss += [loss]

                    if d_step % self._display_frequency == 0:
                        print_avg_loss = sum(print_loss) / len(print_loss)
                        logging.info("Epoch: {}; DIS Step: {}; Loss: {:.4f}"
                                     .format(epoch + 1,
                                             d_step,
                                             print_avg_loss))
                        print_loss = []

                    if d_step % self._checkpoint_frequency == 0:
                        logging.info("Epoch: {}; DIS Step: {}; evaluate valid ...".format(epoch + 1, d_step))
                        valid_score = self.evaluate(self.discriminator, valid_iter)

                        test_score = 0.0
                        if test_iter is not None:
                            logging.info("Epoch: {}; Step: {}; evaluate test ...".format(epoch + 1, d_step))
                            test_score = self.evaluate(self.discriminator, test_iter)

                        # save checkpoints
                        self.save("dis", d_step, valid_score, test_score)

        pass
        pass
