#coding:utf-8
import os
import pickle

import codecs
import tensorflow as tf
# from tensorflow.contrib import rnn
# from tensorflow.contrib import seq2seq
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest

import os

import codecs
import collections
from tqdm import tqdm
import numpy as np

from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras import activations
from tensorflow.keras.layers import Layer, Input, Embedding, LSTM, Dense, Attention
from tensorflow.keras.models import Model


class Encoder(keras.Model):
    def __init__(self, vocab_size, embedding_dim, hidden_units):
        super(Encoder, self).__init__()
        # Embedding Layer
        self.embedding = Embedding(vocab_size, embedding_dim, mask_zero=True)
        # Encode LSTM Layer
        self.encoder_lstm = LSTM(hidden_units, return_sequences=True, return_state=True, name="encode_lstm")

    def call(self, inputs):
        encoder_embed = self.embedding(inputs)
        encoder_outputs, state_h, state_c = self.encoder_lstm(encoder_embed)
        return encoder_outputs, state_h, state_c


class Decoder(keras.Model):
    def __init__(self, vocab_size, embedding_dim, hidden_units):
        super(Decoder, self).__init__()
        # Embedding Layer
        self.embedding = Embedding(vocab_size, embedding_dim, mask_zero=True)
        # Decode LSTM Layer
        self.decoder_lstm = LSTM(hidden_units, return_sequences=True, return_state=True, name="decode_lstm")
        # Attention Layer
        self.attention = Attention()

    def call(self, enc_outputs, dec_inputs, states_inputs):
        decoder_embed = self.embedding(dec_inputs)
        dec_outputs, dec_state_h, dec_state_c = self.decoder_lstm(decoder_embed, initial_state=states_inputs)
        attention_output = self.attention([dec_outputs, enc_outputs])

        return attention_output, dec_state_h, dec_state_c


def Seq2Seq(maxlen, embedding_dim, hidden_units, vocab_size):
    """
    seq2seq model
    """
    # Input Layer
    encoder_inputs = Input(shape=(maxlen,), name="encode_input")
    decoder_inputs = Input(shape=(None,), name="decode_input")
    # Encoder Layer
    encoder = Encoder(vocab_size, embedding_dim, hidden_units)
    enc_outputs, enc_state_h, enc_state_c = encoder(encoder_inputs)
    dec_states_inputs = [enc_state_h, enc_state_c]
    # Decoder Layer
    decoder = Decoder(vocab_size, embedding_dim, hidden_units)
    attention_output, dec_state_h, dec_state_c = decoder(enc_outputs, decoder_inputs, dec_states_inputs)
    # Dense Layer
    dense_outputs = Dense(vocab_size, activation='softmax', name="dense")(attention_output)
    # seq2seq model
    model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=dense_outputs)

    return model


class TrainModel(object):
    """
    序列到序列生成模型
    """

    def __init__(self):
        self.saver = None
        self.vocab_size = 0
        self.id_to_char = {}
        self.char_to_id = {}
        self.pad, self.go, self.eos = 0, 1, 2
        self.maxlen = 10
        self.embedding_dim = 50
        self.hidden_units = 128

    def process_input_data(self, source_data_ids, target_indexs, vocab2id):
        source_inputs = []
        decoder_inputs, decoder_outputs = [], []
        for source, target in zip(source_data_ids, target_indexs):
            source_inputs.append([vocab2id["<GO>"]] + source + [vocab2id["<EOS>"]])
            decoder_inputs.append([vocab2id["<GO>"]] + target)
            decoder_outputs.append(target + [vocab2id["<EOS>"]])
        return source_inputs, decoder_inputs, decoder_outputs

    def load_train_data(self, passengerserverfile):
        '''
        加载训练和测试数据
        '''
        querys = []
        answers = []
        # 词汇表
        char_to_id = {}
        id_to_char = {}
        svocab_size = 0
        pad, go, eos = 0, 1, 2

        with codecs.open(passengerserverfile, "r", "utf-8") as f_in:
            for line in tqdm(f_in, desc="load data from " + passengerserverfile):
                line_array = [section.strip() for section in line.strip().split("\t")]
                if len(line_array) != 2:
                    print("data_format error: %s" % line.encode("utf-8"))
                    continue
                query = line_array[0]
                answer = line_array[1]

                querys.append(query)
                answers.append(answer)

        all_chars = []
        for query in tqdm(querys, desc="query"):
            all_chars += [char for char in query]
        for answer in tqdm(answers, desc="answer"):
            all_chars += [char for char in answer]
        special_words = ["<PAD>", "<GO>", "<EOS>"]
        #         all_chars = special_words + all_chars
        # 上面all_chars记录以字符或者中文字为单位的所有每个字符或者中文字
        # 计数,出现最多的字符排最前面
        counter = collections.Counter(all_chars)
        sorted_counter = sorted(counter.items(), key=lambda x: -x[1])
        all_chars, _ = zip(*sorted_counter)
        all_chars = special_words + list(all_chars)
        # 向量化, 默认PAD为0, GO为1，EOS为2
        char_to_id = dict(zip(all_chars, range(len(all_chars))))
        id_to_char = dict(zip(range(len(all_chars)), all_chars))
        vocab_size = len(char_to_id)

        '''
               形成语料编码
               '''
        new_querys = []
        for query in tqdm(querys, desc="vectorize query"):
            query = [char_to_id[char] for char in query]
            new_querys.append(query)
        querys = new_querys

        new_answers = []
        for answer in tqdm(answers, desc="vectorize answer"):
            answer = [char_to_id[char] for char in answer]
            new_answers.append(answer)
        answers = new_answers

        # shuffle data
        dice = np.array([_ for _ in range(len(querys))], dtype=np.int32)
        np.random.shuffle(dice)

        new_querys = []
        new_answers = []

        for index in dice:
            new_querys.append(querys[index])
            new_answers.append(answers[index])

        self.querys = new_querys
        self.answers = new_answers

        self.id_to_char = id_to_char
        self.char_to_id = char_to_id
        self.vocab_size = vocab_size

        source_input_ids, target_input_ids, target_output_ids = self.process_input_data(self.querys, self.answers,
                                                                                        self.char_to_id)

        source_input_ids = keras.preprocessing.sequence.pad_sequences(source_input_ids, padding='post',
                                                                      maxlen=self.maxlen)
        target_input_ids = keras.preprocessing.sequence.pad_sequences(target_input_ids, padding='post',
                                                                      maxlen=self.maxlen)
        target_output_ids = keras.preprocessing.sequence.pad_sequences(target_output_ids, padding='post',
                                                                       maxlen=self.maxlen)

        self.source_input_ids = source_input_ids
        self.target_input_ids = target_input_ids
        self.target_output_ids = target_output_ids

        dictionary_file = os.path.join(".", "model", "dict.pkl")
        dictionary_info = {}
        dictionary_info["id_to_char"] = self.id_to_char
        dictionary_info["char_to_id"] = self.char_to_id
        dictionary_info["vocab_size"] = self.vocab_size
        with codecs.open(dictionary_file, "wb") as f_out:
            pickle.dump(dictionary_info, f_out)

    def load_dictionary(self, pkl_file_path):
        '''
        dump到文件
        '''
        with codecs.open(pkl_file_path, "rb") as f_in:
            dictionary_info = pickle.load(f_in)
        self.id_to_char = dictionary_info["id_to_char"]
        self.char_to_id = dictionary_info["char_to_id"]
        self.vocab_size = dictionary_info["vocab_size"]
    def build_model(self):
        K.clear_session()

        self.model = Seq2Seq(self.maxlen, self.embedding_dim, self.hidden_units, self.vocab_size)
        self.model.summary()

    def save_parameters(self):
        '''
        保存模型
        '''
        self.model.save_weights("model/seq2seq_attention_weights.h5")

    def load_parameters(self):
        '''
        加载模型
        '''
        self.model = Seq2Seq(self.maxlen, self.embedding_dim, self.hidden_units, self.vocab_size)
        self.model.load_weights("model/seq2seq_attention_weights.h5")
        print(self.model.summary())

    def train(self, model_dir, model_name, epochs, batch_size, step_to_save):
        '''
        训练模型
        '''
        epochs = 20
        batch_size = 32
        val_rate = 0.2

        loss_fn = keras.losses.SparseCategoricalCrossentropy()
        self.model.compile(loss=loss_fn, optimizer='adam')
        self.model.fit([self.source_input_ids, self.target_input_ids], self.target_output_ids,
                       batch_size=batch_size, epochs=epochs, validation_split=val_rate)


if __name__ == "__main__":
    model_dir = os.path.join(".", "model")
    model_name = "youfan_model.ckpt"
    dictionary_file = os.path.join(".", "model", "dict.pkl")
    data_file = os.path.join(".", "data", "passengerinfo.txt")
    train_model = TrainModel()
    train_model.load_train_data(data_file)
    train_model.build_model()
    train_model.train(model_dir, model_name, 10, 10, 10)
    train_model.save_parameters()
