#coding:utf-8
import os
import pickle

import codecs
import collections
from tqdm import tqdm
import numpy as np
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras import activations
from tensorflow.keras.layers import Layer, Input, Embedding, LSTM, Dense, Attention
from tensorflow.keras.models import Model

from . import trainModel
# import trainModel

import sys

def encoder_infer(model):
#     encoder_model = Model(inputs=model.get_layer('encoder').inputs,
#                         outputs=model.get_layer('encoder').outputs)
    encoder_model = Model(inputs=model.get_layer('encoder').get_input_at(0),
                        outputs=model.get_layer('encoder').get_output_at(0))
    return encoder_model


def decoder_infer(model, encoder_model):
    encoder_output = encoder_model.get_layer('encoder').output[0]
    maxlen, hidden_units = encoder_output.shape[1:]

    dec_input = model.get_layer('decode_input').input
    enc_output = Input(shape=(maxlen, hidden_units), name='enc_output')
    dec_input_state_h = Input(shape=(hidden_units,), name='input_state_h')
    dec_input_state_c = Input(shape=(hidden_units,), name='input_state_c')
    dec_input_states = [dec_input_state_h, dec_input_state_c]

    decoder = model.get_layer('decoder')
    dec_outputs, out_state_h, out_state_c = decoder(enc_output, dec_input, dec_input_states)
    dec_output_states = [out_state_h, out_state_c]

    decoder_dense = model.get_layer('dense')
    dense_output = decoder_dense(dec_outputs)

    decoder_model = Model(inputs=[enc_output, dec_input, dec_input_states],
                          outputs=[dense_output] + dec_output_states)
    return decoder_model

class DecodeModel(object):
    """
    序列到序列生成模型
    """
    def __init__(self, model):
        self.saver = None
        self.vocab_size = 0
        self.id_to_char = {}
        self.char_to_id = {}
        self.pad, self.go, self.eos = 0, 1, 2
        self.maxlen = 10
        self.model = model

    def load_dictionary(self, pkl_file_path):
        '''
        dump到文件
        '''
        with codecs.open(pkl_file_path, "rb") as f_in:
            dictionary_info = pickle.load(f_in)
        self.id_to_char = dictionary_info["id_to_char"]
        self.char_to_id = dictionary_info["char_to_id"]
        self.vocab_size = dictionary_info["vocab_size"]

    def build_model(self):
        self.encoder_model = encoder_infer(self.model)
        print(self.encoder_model.summary())
        self.decoder_model = decoder_infer(self.model, self.encoder_model)
        print(self.decoder_model.summary())

    def load_parameters(self, session, model_dir):
        '''
        加载模型
        '''
        pass

    def inference(self, input_text):
        '''
        测试模型
        '''
        char_to_id=self.char_to_id
        id_to_char=self.id_to_char
        text_words = input_text#.split()[:self.maxlen]
#         print(text_words)
        input_id = [char_to_id[w] for w in text_words]
        input_id = [char_to_id["<GO>"]] + input_id + [char_to_id["<EOS>"]]
        if len(input_id) < self.maxlen:
            input_id = input_id + [char_to_id["<PAD>"]] * (self.maxlen-len(input_id))

        input_source = np.array([input_id])
        input_target = np.array([char_to_id["<GO>"]])

        # 编码器encoder预测输出
        enc_outputs, enc_state_h, enc_state_c = self.encoder_model.predict([input_source])
        dec_inputs = input_target
        dec_states_inputs = [enc_state_h, enc_state_c]

        result_id = []
        result_text = []
        for i in range(self.maxlen):
            # 解码器decoder预测输出
            dense_outputs, dec_state_h, dec_state_c = self.decoder_model.predict([enc_outputs, dec_inputs]+dec_states_inputs)
            pred_id = np.argmax(dense_outputs[0][0])
            result_id.append(pred_id)
            result_text.append(id_to_char[pred_id])
            if id_to_char[pred_id] == "<EOS>":
                break
            dec_inputs = np.array([[pred_id]])
            dec_states_inputs = [dec_state_h, dec_state_c]
        return result_id, result_text

def query_answer(batch_query):
    print('batch_query:',batch_query)
    import os
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
    result = ""
    basedir = "D:\\workspace\\zhinengkefu\\infoserver\\"
    model_dir = os.path.join(basedir, "model")
    dictionary_file = os.path.join(basedir,
                                   "model", "dict.pkl")
    encode_Model = trainModel.TrainModel()
    encode_Model.load_dictionary(dictionary_file)
    encode_Model.load_parameters()
    decode_Model = DecodeModel(encode_Model.model)
    decode_Model.load_dictionary(dictionary_file)
    decode_Model.build_model()
    _, result = decode_Model.inference(batch_query)
    # for answer in decode_Model.inference(model_dir, [batch_query]):
    #     result = answer
    #     break
    return result

if __name__ == "__main__":
    model_dir = os.path.join(".", "model")
    dictionary_file = os.path.join(".", "model", "dict.pkl")
    input_text = "早上好"

    encode_Model = trainModel.TrainModel()
    encode_Model.load_dictionary(dictionary_file)
    encode_Model.load_parameters()
    decode_Model = DecodeModel(encode_Model.model)
    decode_Model.load_dictionary(dictionary_file)
    decode_Model.build_model()
    print(decode_Model.inference(input_text))

