from __future__ import print_function
from heapq import nlargest
from keras_models import *

import os
import sys
import random
import pickle

random.seed(42)
os.environ['SHENJI_QA'] = 'shenji_qa_python'


class Evaluator:
    def __init__(self, conf=None):
        try:
            data_path = os.environ['SHENJI_QA']
        except KeyError:
            print("SHENJI_QA is not set.  Set it to your path of shenji_qa_python")
            sys.exit(1)
        self.path = data_path
        self.conf = dict() if conf is None else conf
        self.params = conf.get('training_params', dict())
        self.answers = self.load('answers')
        self._vocab = None
        self._reverse_vocab = None
        self._eval_sets = None

    ##### Resources #####

    def load(self, name):
        return pickle.load(open(os.path.join(self.path, name), 'rb'))

    def vocab(self):
        if self._vocab is None:
            self._vocab = self.load('vocabulary')
        return self._vocab

    def reverse_vocab(self):
        if self._reverse_vocab is None:
            vocab = self.vocab()
            self._reverse_vocab = dict((v.lower(), k) for k, v in vocab.items())
        return self._reverse_vocab

    ##### Loading / saving #####

    def load_epoch(self, model, epoch):
        assert os.path.exists('shenji_qa_models/weights_epoch_%d.h5' % epoch), 'Weights at epoch %d not found' % epoch
        model.load_weights('shenji_qa_models/weights_epoch_%d.h5' % epoch)

    ##### Converting / reverting #####

    def convert(self, words):
        rvocab = self.reverse_vocab()
        if type(words) == str:
            words = words.strip().lower().split('\t')
        return [rvocab.get(w, 0) for w in words]

    def revert(self, indices):
        vocab = self.vocab()
        return [vocab.get(i, 'X') for i in indices]

    ##### Padding #####

    def padq(self, data):
        return self.pad(data, self.conf.get('question_len', None))

    def pada(self, data):
        return self.pad(data, self.conf.get('answer_len', None))

    def pad(self, data, len=None):
        from keras.preprocessing.sequence import pad_sequences
        return pad_sequences(data, maxlen=len, padding='post', truncating='post', value=0)

    def eval_sets(self):
        if self._eval_sets is None:
            self._eval_sets = dict([(s, self.load(s)) for s in ['dev', 'test1']])
        return self._eval_sets

    def answer_select(self, model, question, answer_candidates):
        output_qa = list()
        indices = list()

        output_qa.append(question.replace('\t', ''))
        for answer_candidate in answer_candidates:
            indices.append(self.convert(answer_candidate))
        answers = self.pada(indices)
        question = self.padq([self.convert(question)] * len(indices))

        sims = model.predict([question, answers], batch_size=10).flatten()
        # print(len(sims))
        top_answers = nlargest(2, xrange(len(answer_candidates)), key=lambda i: sims[i])
        for top_answer in top_answers:
            output_qa.append(''.join(answer_candidates[top_answer]).replace('\t', ''))

        return output_qa


if __name__ == '__main__':
    conf = {
        'question_len': 128,
        'answer_len': 128,
        'n_words': 5809,  # len(vocabulary) + 1
        'margin': 0.05,

        'training_params': {
            # 'save_every': 1,
            # 'eval_every': 1,
            # 'batch_size': 128,
            # 'nb_epoch': 200,
            # 'validation_split': 0.2,
            'optimizer': 'adam',
            # 'optimizer': Adam(clip_norm=0.1),
            # 'n_eval': 100,

        },

        'model_params': {
            'n_embed_dims': 100,
            'n_hidden': 200,

            # convolution
            'nb_filters': 500,  # * 4
            'conv_activation': 'relu',

            # recurrent
            'n_lstm_dims': 141,  # * 2

            # 'initial_embed_weights': np.load('shenji_qa_word2vec_100_dim.embeddings'),
        },

        'similarity_params': {
            'mode': 'cosine',
            'gamma': 1,
            'c': 1,
            'd': 2,
        }
    }

    evaluator = Evaluator(conf)

    model = ConvolutionModel(conf)
    optimizer = conf.get('training_params', dict()).get('optimizer', 'adam')
    model.compile(optimizer=optimizer)

    qa_question = sys.argv[1]
    qa_answers = sys.argv[2:]

    # evaluate mrr for a particular epoch after finishing the training
    evaluator.load_epoch(model, 200)
    target_answers = evaluator.answer_select(model, qa_question, qa_answers)
    for target_answer in target_answers:
        print(target_answer)
