import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras import activations
from tensorflow.keras.layers import Layer, Input, Embedding, LSTM, Dense, Attention
from tensorflow.keras.models import Model


class Encoder(keras.Model):
    def __init__(self, vocab_size, embedding_dim, hidden_units):
        super(Encoder, self).__init__()
        # Embedding Layer
        self.embedding = Embedding(vocab_size, embedding_dim, mask_zero=True)
        # Encode LSTM Layer
        self.encoder_lstm = LSTM(hidden_units, return_sequences=True, return_state=True, name="encode_lstm")

    def call(self, inputs):
        encoder_embed = self.embedding(inputs)
        encoder_outputs, state_h, state_c = self.encoder_lstm(encoder_embed)
        return encoder_outputs, state_h, state_c


class Decoder(keras.Model):
    def __init__(self, vocab_size, embedding_dim, hidden_units):
        super(Decoder, self).__init__()
        # Embedding Layer
        self.embedding = Embedding(vocab_size, embedding_dim, mask_zero=True)
        # Decode LSTM Layer
        self.decoder_lstm = LSTM(hidden_units, return_sequences=True, return_state=True, name="decode_lstm")
        # Attention Layer
        self.attention = Attention()

    def call(self, enc_outputs, dec_inputs, states_inputs):
        decoder_embed = self.embedding(dec_inputs)
        dec_outputs, dec_state_h, dec_state_c = self.decoder_lstm(decoder_embed, initial_state=states_inputs)
        attention_output = self.attention([dec_outputs, enc_outputs])

        return attention_output, dec_state_h, dec_state_c

def Seq2Seq(maxlen, embedding_dim, hidden_units, vocab_size):
    """
    seq2seq model
    """
    # Input Layer
    encoder_inputs = Input(shape=(maxlen,), name="encode_input")
    decoder_inputs = Input(shape=(None,), name="decode_input")
    # Encoder Layer
    encoder = Encoder(vocab_size, embedding_dim, hidden_units)
    enc_outputs, enc_state_h, enc_state_c = encoder(encoder_inputs)
    dec_states_inputs = [enc_state_h, enc_state_c]
    # Decoder Layer
    decoder = Decoder(vocab_size, embedding_dim, hidden_units)
    attention_output, dec_state_h, dec_state_c = decoder(enc_outputs, decoder_inputs, dec_states_inputs)
    # Dense Layer
    dense_outputs = Dense(vocab_size, activation='softmax', name="dense")(attention_output)
    # seq2seq model
    model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=dense_outputs)

    return model

def read_vocab(vocab_path):
    vocab_words = []
    with open(vocab_path, "r", encoding="utf8") as f:
        for line in f:
            vocab_words.append(line.strip())
    return vocab_words

def read_data(data_path):
    datas = []
    with open(data_path, "r", encoding="utf8") as f:
        for line in f:
            words = line.strip().split()
            datas.append(words)
    return datas

def process_data_index(datas, vocab2id):
    data_indexs = []
    for words in datas:
        line_index = [vocab2id[w] if w in vocab2id else vocab2id["<UNK>"] for w in words]
        data_indexs.append(line_index)
    return data_indexs

def process_input_data(source_data_ids, target_indexs, vocab2id):
    source_inputs = []
    decoder_inputs, decoder_outputs = [], []
    for source, target in zip(source_data_ids, target_indexs):
        source_inputs.append([vocab2id["<GO>"]] + source + [vocab2id["<EOS>"]])
        decoder_inputs.append([vocab2id["<GO>"]] + target)
        decoder_outputs.append(target + [vocab2id["<EOS>"]])
    return source_inputs, decoder_inputs, decoder_outputs

def train(begin_ep,turns):
    vocab_words = read_vocab("data/ch_word_vocab.txt")
    special_words = ["<PAD>", "<UNK>", "<GO>", "<EOS>"]
    vocab_words = special_words + vocab_words
    vocab2id = {word: i for i, word in enumerate(vocab_words)}
    id2vocab = {i: word for i, word in enumerate(vocab_words)}

    num_sample = 10000
    source_data = read_data("data/ch_source_data_seg.txt")[:num_sample]
    source_data_ids = process_data_index(source_data, vocab2id)
    target_data = read_data("data/ch_target_data_seg.txt")[:num_sample]
    target_data_ids = process_data_index(target_data, vocab2id)

    print("vocab test: ", [id2vocab[i] for i in range(10)])
    print("source test: ", source_data[10])
    print("source index: ", source_data_ids[10])
    print("target test: ", target_data[10])
    print("target index: ", target_data_ids[10])

    source_input_ids, target_input_ids, target_output_ids = process_input_data(source_data_ids, target_data_ids,
                                                                               vocab2id)
    print("encoder inputs: ", source_input_ids[:2])
    print("decoder inputs: ", target_input_ids[:2])
    print("decoder outputs: ", target_output_ids[:2])

    maxlen = 10
    source_input_ids = keras.preprocessing.sequence.pad_sequences(source_input_ids, padding='post', maxlen=maxlen)
    target_input_ids = keras.preprocessing.sequence.pad_sequences(target_input_ids, padding='post', maxlen=maxlen)
    target_output_ids = keras.preprocessing.sequence.pad_sequences(target_output_ids, padding='post', maxlen=maxlen)
    print(source_data_ids[:5])
    print(target_input_ids[:5])
    print(target_output_ids[:5])

    maxlen = 10
    embedding_dim = 50
    hidden_units = 128
    vocab_size = len(vocab2id)
    print('vocab_size: ', vocab_size)

    model = Seq2Seq(maxlen, embedding_dim, hidden_units, vocab_size)
    if begin_ep > 0:
        model_file = "data/seq2seq_attention_weights_"+str(begin_ep)+".h5"
        model.load_weights(model_file)
    model.summary()

    epochs = turns
    batch_size = 2048
    val_rate = 0.2

    loss_fn = keras.losses.SparseCategoricalCrossentropy()
    model.compile(loss=loss_fn, optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3))
    model.fit([source_input_ids, target_input_ids], target_output_ids,
              batch_size=batch_size, epochs=epochs, validation_split=val_rate,)
    model_file = "data/seq2seq_attention_weights_"+str(epochs+begin_ep)+".h5"
    model.save_weights(model_file)


if __name__ == "__main__":
    train(100,900)