import os
import time
import logging
import torch
import json
import models.model_helper as model_helper
from utils.metrics import run_rank_evaluate


LOG_FORMAT = '%(asctime)s %(levelname)s %(message)s'
USE_CUDA = torch.cuda.is_available()


class SupervisedTrainer(object):
    def __init__(self, config, expt_dir, ckpt_path):

        self.config = config

        self._batch_size = config['batch_size']
        self._display_frequency = config['display_frequency']
        self._checkpoint_frequency = config['checkpoint_frequency']
        self.expt_dir = expt_dir
        self.ckpt_path = os.path.join(self.expt_dir, "checkpoints")

        if not os.path.exists(self.expt_dir):
            os.mkdir(self.expt_dir)
        if not os.path.exists(self.ckpt_path):
            os.mkdir(self.ckpt_path)

        # logging file
        log_file = os.path.join(self.expt_dir, 'log.txt')
        logging.basicConfig(format=LOG_FORMAT, level=logging.INFO, filename=log_file, filemode='a')
        console = logging.StreamHandler()
        console.setLevel(logging.INFO)
        formatter = logging.Formatter(LOG_FORMAT)
        console.setFormatter(formatter)
        logging.getLogger('').addHandler(console)

        # save config
        json.dump(config, open(os.path.join(expt_dir, "config.json"), 'w', encoding="utf-8"))
        for key in config:
            logging.info("---{}: {}".format(key, config[key]))

        # load model
        matcher, optimizer, loss = model_helper.load_or_build_pairwise_model(config, ckpt_path)
        logging.info("USE CUDA: {}".format(USE_CUDA))
        self.device = torch.device("cuda" if USE_CUDA else "cpu")
        self.matcher = matcher.to(self.device)
        self.criterion = loss.to(self.device)
        self.optimizer = optimizer

        pass

    def train_batch(self, query_inputs: torch.Tensor, pos_inputs: torch.Tensor, neg_inputs: torch.Tensor):
        # Zero gradients
        self.optimizer.optimizer.zero_grad()

        # set device options
        query_inputs = query_inputs.to(self.device)
        pos_inputs = pos_inputs.to(self.device)
        neg_inputs = neg_inputs.to(self.device)
        targets = torch.ones(query_inputs.size(0)).to(self.device)

        pos_score = self.matcher.forward(query_inputs=query_inputs,
                                         candidate_inputs=pos_inputs)
        neg_score = self.matcher.forward(query_inputs=query_inputs,
                                         candidate_inputs=neg_inputs)
        # print(pos_score.size(), neg_score.size())
        loss = self.criterion(pos_score, neg_score, targets)

        # Perform back-propatation
        loss.backward()
        # run optimizer
        self.optimizer.step()

        return loss.item()
        pass

    def evaluate(self, valid_iter):
        # set eval mode
        self.matcher.eval()

        predictions, labels = [], []
        with torch.no_grad():
            for batch_data in valid_iter:
                batch_query_inputs = batch_data[0].to(self.device)
                batch_candidate_inputs = batch_data[1].to(self.device)

                score = self.matcher.forward(batch_query_inputs, batch_candidate_inputs)
                predictions += score.tolist()
                labels += batch_data[2].tolist()

        # reset to train mode
        self.matcher.train()

        groups = valid_iter.groups()

        res_dict = run_rank_evaluate(labels, predictions, groups)
        scores = []
        for key in res_dict:
            logging.info("... {}:{:.4f}".format(key, res_dict[key]))
            scores += [res_dict[key]]
        avg_score = sum(scores) / len(scores)
        return avg_score
        pass

    def save(self, step, valid_score, test_score):
        date_time = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime())

        torch.save({"model": self.matcher.state_dict(),
                    "optimizer": self.optimizer.optimizer.state_dict()},
                   os.path.join(self.ckpt_path, self.config['matcher']+"_"+date_time
                                + "_step_{}_valid_{:.4f}_test_{:.4f}.tar"
                                .format(step, valid_score, test_score)))
        pass

    def train(self, train_iter, valid_iter, test_iter=None):
        logging.info("train number of samples: {}".format(len(train_iter)))
        logging.info("valid number of samples: {}".format(len(valid_iter)))

        config = self.config
        step = 0

        print_loss = []
        for epoch in range(config['max_epoch']):
            for batch_data in train_iter:
                step += 1

                batch_query_inputs, batch_pos_inputs, batch_neg_inputs = batch_data

                loss = self.train_batch(query_inputs=batch_query_inputs,
                                        pos_inputs=batch_pos_inputs,
                                        neg_inputs=batch_neg_inputs)
                print_loss += [loss]

                if step % self._display_frequency == 0:
                    print_avg_loss = sum(print_loss) / len(print_loss)
                    logging.info("Epoch: {}; Step: {}; Loss: {:.4f}".format(epoch + 1,
                                                                            step,
                                                                            print_avg_loss))

                if step % self._checkpoint_frequency == 0:
                    logging.info("Epoch: {}; Step: {}; evaluate valid ...".format(epoch+1, step))
                    valid_score = self.evaluate(valid_iter)

                    test_score = 0.0
                    if test_iter is not None:
                        logging.info("Epoch: {}; Step: {}; evaluate test ...".format(epoch + 1, step))
                        test_score = self.evaluate(test_iter)

                    # save checkpoints
                    self.save(step, valid_score, test_score)
                    # update optimizer
                    # self.optimizer.update(valid_score)
                    pass

        pass
