from .bert import modeling, tokenization
from .bert.extract_features import read_examples, convert_examples_to_features, model_fn_builder, input_fn_builder
import tensorflow as tf
from annoy import AnnoyIndex
from enum import Enum
import numpy as np


class BertRetrievalModel:
    def __init__(self, corpus, bert_model_ckpt, bert_config_file, bert_vocab_file, max_seq_len, pooling_strategy, pooling_layer):
        corpus_str = []
        for line in corpus:
            corpus_str.append(''.join(line))
        self.max_seq_len = max_seq_len
        print('Build bert model...')
        self.layer_indexes = [int(x) for x in pooling_layer.split(",")]
        bert_config = modeling.BertConfig.from_json_file(bert_config_file)
        self.tokenizer = tokenization.FullTokenizer(vocab_file=bert_vocab_file, do_lower_case=False)
        is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
        run_config = tf.contrib.tpu.RunConfig(master=None,
                                              tpu_config=tf.contrib.tpu.TPUConfig(num_shards=8, per_host_input_for_training=is_per_host))
        print('Read example...')
        examples = read_examples(corpus_str)

        features = convert_examples_to_features(examples=examples, seq_length=self.max_seq_len, tokenizer=self.tokenizer)
        print('Read example finished...')

        model_fn = model_fn_builder(
            bert_config=bert_config,
            init_checkpoint=bert_model_ckpt,
            layer_indexes=self.layer_indexes,
            use_tpu=False,
            use_one_hot_embeddings=False)

        # If TPU is not available, this will fall back to normal Estimator on CPU
        # or GPU.
        self.estimator = tf.contrib.tpu.TPUEstimator(
            use_tpu=False,
            model_fn=model_fn,
            config=run_config,
            predict_batch_size=100)

        input_fn = input_fn_builder(features=features, seq_length=self.max_seq_len)

        print('start to encode...')
        all_result = []
        for result in self.estimator.predict(input_fn, yield_single_examples=False):
            #print(result)
            tmp_all_layers = []
            for (j, layer_index) in enumerate(self.layer_indexes):
                layer_output = result["layer_output_%d" % j]
                tmp_all_layers.append(layer_output)
                #print('layer_output shape: ', layer_output.shape)
            tmp_con = np.concatenate(tmp_all_layers, axis=-1)
            print('tmp_con shape: ', tmp_con.shape)
            all_result.append(tmp_con)

        encoder_layer = np.concatenate(all_result, axis=0)
        print('encoder_layer shape: ', encoder_layer.shape)

        input_mask = []
        for tmp_feature in features:
            input_mask.append(tmp_feature.input_mask)

        input_mask = np.array(input_mask, dtype=np.float32)
        print('input_mask shape: ', input_mask.shape)

        # ndarray, most time is consumed here
        self.sentence_embeddings = self.get_pooling_res(encoder_layer, input_mask, pooling_strategy)
        print('sentence_embeddings shape: ', self.sentence_embeddings.shape)
        print('finish...')
        # build search model
        self.t = AnnoyIndex(768)
        for i in range(self.sentence_embeddings.shape[0]):
            self.t.add_item(i, self.sentence_embeddings[i, :])
        self.t.build(10)

    def get_pooling_res(self, encoder_layer, input_mask, pooling_strategy):
        if pooling_strategy == 0:
            pooled = self.masked_reduce_mean(encoder_layer, input_mask)
        elif pooling_strategy == 1:
            pooled = self.masked_reduce_max(encoder_layer, input_mask)
        elif pooling_strategy == 2:
            pooled = np.concatenate((self.masked_reduce_mean(encoder_layer, input_mask),
                                self.masked_reduce_max(encoder_layer, input_mask)), axis=1)
        elif pooling_strategy == 3:
            pooled = np.squeeze(encoder_layer[:, 0:1, :], axis=1)
        elif pooling_strategy == 4:
            seq_len = np.cast(np.sum(input_mask, axis=1), np.int32)
            rng = np.arange(0, np.shape(seq_len)[0])
            indexes = np.stack([rng, seq_len - 1], 1)
            #pooled = np.gather_nd(encoder_layer, indexes)
        elif pooling_strategy == 5:
            pooled = encoder_layer
        else:
            raise NotImplementedError()
        return pooled

    def masked_reduce_mean(self, x, mask, jitter=1e-10):
        return np.sum(self.mul_mask(x, mask), axis=1) / (np.sum(mask, axis=1, keepdims=True) + jitter)

    def masked_reduce_max(self, x, mask):
        return np.max(self.minus_mask(x, mask), axis=1)

    def minus_mask(self, x, mask, offset=1e30):
        """
        masking by subtract a very large number
        :param x: sequence data in the shape of [B, L, D]
        :param mask: 0-1 mask in the shape of [B, L]
        :param offset: very large negative number
        :return: masked x
        """
        return x - np.expand_dims(1.0 - mask, axis=-1) * offset

    def mul_mask(self, x, mask):
        """
        masking by multiply zero
        :param x: sequence data in the shape of [B, L, D]
        :param mask: 0-1 mask in the shape of [B, L]
        :return: masked x
        """
        return x * np.expand_dims(mask, axis=-1)

    def get_top_similarities(self, query, topk=10):
        """query: [word1, word2, ..., wordn]"""
        query = ''.join(query)
        examples = read_examples([query])
        features = convert_examples_to_features(examples=examples, seq_length=self.max_seq_len, tokenizer=self.tokenizer)
        input_fn = input_fn_builder(features=features, seq_length=self.max_seq_len)

        all_result = []
        for result in self.estimator.predict(input_fn, yield_single_examples=False):
            # print(result)
            tmp_all_layers = []
            for (j, layer_index) in enumerate(self.layer_indexes):
                layer_output = result["layer_output_%d" % j]
                tmp_all_layers.append(layer_output)
                # print('layer_output shape: ', layer_output.shape)
            tmp_con = np.concatenate(tmp_all_layers, axis=-1)
            print('tmp_con shape: ', tmp_con.shape)
            all_result.append(tmp_con)

        encoder_layer = np.concatenate(all_result, axis=0)
        print('encoder_layer shape: ', encoder_layer.shape)

        input_mask = []
        for tmp_feature in features:
            input_mask.append(tmp_feature.input_mask)

        input_mask = np.array(input_mask, dtype=np.float32)
        print('input_mask shape: ', input_mask.shape)
        print('input_mask: ', input_mask)

        query_embedding = self.get_pooling_res(encoder_layer, input_mask, 0)[0]

        top_ids, top_distances = self.t.get_nns_by_vector(query_embedding, n=topk, include_distances=True)
        return top_ids