import os
import numpy as np

from tensorflow.python.training.training_util import _get_or_create_global_step_read as get_global_step
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.basic_session_run_hooks import SecondOrStepTimer
from tensorflow.python.training.session_run_hook import SessionRunArgs, SessionRunHook

from run_classifier import input_fn_builder
from utils import PRF, eval_reranker, print_metrics


class EvalHook(SessionRunHook):
    def __init__(self, estimator, dev_features, dev_label, dev_cid, max_seq_length, th=82.0, eval_steps=None,
                 checkpoint_dir=None, model_name=None, _input_fn_builder=None, tail_num=0, type_word=''):
        logging.info("Create EvalHook.")
        self.estimator = estimator
        self.dev_features = dev_features
        self.dev_label = dev_label
        self.dev_cid = dev_cid
        self.max_seq_length = max_seq_length
        self.th = th
        self._checkpoint_dir = checkpoint_dir
        if os.path.exists('./EVAL_LOG') is False:
            os.mkdir('./EVAL_LOG')
        self.model_name = model_name
        self.tail_num = tail_num
        self.org_dir = "CQA_" + type_word + self.model_name + "_{}".format(self.tail_num)

        self._log_save_path = os.path.join('./EVAL_LOG', model_name + '_' + type_word + '_log')
        self._save_path = checkpoint_dir
        if os.path.exists(self._save_path) is False:
            os.mkdir(self._save_path)
        self._timer = SecondOrStepTimer(every_steps=eval_steps)
        self._steps_per_run = 1
        self._global_step_tensor = None
        self._saver = None

        if _input_fn_builder is not None:
            self.input_fn_builder = _input_fn_builder
        else:
            self.input_fn_builder = input_fn_builder

    def _set_steps_per_run(self, steps_per_run):
        self._steps_per_run = steps_per_run

    def begin(self):
        self._global_step_tensor = get_global_step()  # pylint: disable=protected-access
        if self._global_step_tensor is None:
            raise RuntimeError(
                "Global step should be created to use EvalHook.")

    def before_run(self, run_context):  # pylint: disable=unused-argument
        return SessionRunArgs(self._global_step_tensor)

    def after_run(self, run_context, run_values):
        stale_global_step = run_values.results
        if self._timer.should_trigger_for_step(
                stale_global_step + self._steps_per_run):
            # get the real value after train op.
            global_step = run_context.session.run(self._global_step_tensor)
            if self._timer.should_trigger_for_step(global_step):
                self._timer.update_last_triggered_step(global_step)
                MAP, MRR = self.evaluation(global_step)
                # print("================", MAP, MRR, self.th, type(MAP), type(MRR), type(self.th))
                if MAP > self.th:
                    # print("================", MAP, MRR)
                    self._save(run_context.session, global_step, MAP, MRR)

    def end(self, session):
        last_step = session.run(self._global_step_tensor)
        if last_step != self._timer.last_triggered_step():
            MAP, MRR = self.evaluation(last_step)
            # print("================", MAP, MRR, self.th, type(MAP), type(MRR), type(self.th))
            if MAP > self.th:
                # print("================", MAP, MRR)
                self._save(session, last_step, MAP, MRR)

    def evaluation(self, global_step):
        eval_input_fn = self.input_fn_builder(
            features=self.dev_features,
            seq_length=self.max_seq_length,
            is_training=False,
            drop_remainder=False)

        predictions = self.estimator.predict(eval_input_fn, yield_single_examples=False)
        res = np.concatenate([a["prob"] for a in predictions], axis=0)

        metrics = PRF(np.array(self.dev_label), res.argmax(axis=-1))

        print('\n Global step is : ', global_step)
        MAP, AvgRec, MRR = eval_reranker(self.dev_cid, self.dev_label, res[:, 1], reranking_th=-1e10)

        metrics['MAP'] = MAP
        metrics['AvgRec'] = AvgRec
        metrics['MRR'] = MRR

        metrics['global_step'] = global_step

        print_metrics(metrics, 'dev', save_dir=self._save_path)

        # creat new samples
        scored_samples = []
        for index, label, score in zip(self.dev_cid, self.dev_label, res[:, 1]):
            scored_samples.append(QaSample(q_id=index.split('-')[0],
                                           a_id=index.split('-')[1],
                                           label=label,
                                           score=score))
        with open('data/output/WikiQA-dev.rank', 'w') as fout:
            for sample, rank in get_final_rank(scored_samples):
                fout.write('{}\t{}\t{}\n'.format(sample.q_id, sample.a_id, rank))
        dev_MAP, dev_MRR = eval_map_mrr('data/output/WikiQA-dev.rank', 'data/raw/WikiQA-test-gold.tsv')
        print('Dev MAP: {}, MRR: {}'.format(dev_MAP, dev_MRR))
        return MAP * 100, MRR

    def _save(self, session, step, map=None, mrr=None):
        """Saves the latest checkpoint, returns should_stop."""
        save_path = os.path.join(self._save_path, "step{}_MAP{:5.4f}_MRR{:5.4f}".format(step, map, mrr))

        list_name = os.listdir(self.org_dir)
        for name in list_name:
            if "model.ckpt-{}".format(step-1) in name:
                org_name = os.path.join(self.org_dir, name)
                tag_name = save_path + "." + name.split(".")[-1]
                print("save {} to {}".format(org_name, tag_name))
                with open(org_name, "rb") as fr, open(tag_name, 'wb') as fw:
                    fw.write(fr.read())

class QaSample(object):
    def __init__(self, q_id, a_id, label=None, score=0):
        self.q_id = q_id
        # self.question = question
        self.a_id = a_id
        # self.answer = answer
        self.label = int(label)
        self.score = float(score)


def get_final_rank(scored_samples):
    sample_final_rank = []
    same_q_samples = []
    for sample in scored_samples:
        if len(same_q_samples) == 0 or sample.q_id == same_q_samples[0].q_id:
            same_q_samples.append(sample)
        else:
            sorted_samples = sorted(same_q_samples, key=lambda s: s.score, reverse=True)
            sample_rank_map = {sample: rank for rank, sample in enumerate(sorted_samples)}
            for same_q_sample in same_q_samples:
                sample_final_rank.append((same_q_sample, sample_rank_map[same_q_sample]))
            same_q_samples = [sample]
    if len(same_q_samples) > 0:
        sorted_samples = sorted(same_q_samples, key=lambda s: s.score, reverse=True)
        sample_rank_map = {sample: rank for rank, sample in enumerate(sorted_samples)}
        for same_q_sample in same_q_samples:
            sample_final_rank.append((same_q_sample, sample_rank_map[same_q_sample]))
    return sample_final_rank


def eval_map_mrr(answer_file, gold_file):
    dic = {}
    fin = open(gold_file, encoding='utf-8')
    for line in fin:
        line = line.strip()
        if not line:
            continue
        cols = line.split('\t')
        if cols[0] == 'QuestionID':
            continue

        q_id = cols[4].split("-")[0]
        a_id = cols[4].split("-")[1]

        if not q_id in dic:
            dic[q_id] = {}
        dic[q_id][a_id] = [cols[6], -1]
    fin.close()

    fin = open(answer_file, encoding='utf-8')
    for line in fin:
        line = line.strip()
        if not line:
            continue
        cols = line.split('\t')
        q_id = cols[0]
        a_id = cols[1]
        rank = int(cols[2])
        dic[q_id][a_id][1] = rank
    fin.close()

    MAP = 0.0
    MRR = 0.0
    for q_id in dic:
        sort_rank = sorted(dic[q_id].items(), key=lambda asd: asd[1][1], reverse=False)
        correct = 0
        total = 0
        AP = 0.0
        mrr_mark = False
        for i in range(len(sort_rank)):
            # compute MRR
            if sort_rank[i][1][0] == '1' and mrr_mark == False:
                MRR += 1.0 / float(i + 1)
                mrr_mark = True
            # compute MAP
            total += 1
            if sort_rank[i][1][0] == '1':
                correct += 1
                AP += float(correct) / float(total)
        AP /= float(correct)
        MAP += AP

    MAP /= float(len(dic))
    MRR /= float(len(dic))
    return MAP, MRR
