# -*- coding: utf-8 -*-
"""
Created on Mon Mar  11 17:18:52 2019

@author: william

Email: hua_yan_tsn@163.com
"""
import tensorflow as tf
layers = tf.keras.layers
K = tf.keras.backend
from loadData import *
from PositionEmbeddings import getPositionEmbeddings
from WordEmbeddings import laptop_model
from Evaluate import Evaluate
import numpy as np
MAX_LEN = 82
ASPECT_MAX_LEN = 9
CLASS_NUM = 3
WORD_EMBEDDINGS_DIM = 300
POSITION_EMBEDDINGS_DIM = 50
WORD_NUMS = 4582
def reduce_dimension(x, length, mask):
    res = K.reshape(x, [-1, length])  # (?, 78)
    res = K.softmax(res)
    res = res * K.cast(mask, dtype='float32')  # (?, 78)
    temp = K.sum(res, axis=1, keepdims=True)  # (?, 1)
    temp = K.repeat_elements(temp, rep=length, axis=1)  # (?, 78)
    return res / temp

def normalize(X):
    result = K.softmax(X)
    print(X.shape)
    temp = K.sum(result, axis=1, keepdims=True)
    temp = K.repeat_elements(temp, rep=X.shape[1],axis = 1)
    return result / temp

def reduce_dimension_output_shape(input_shape):
    shape = list(input_shape)
    assert len(shape) == 3  # only valid for 3D tensors
    return [shape[0], shape[1]]

def attention(x, dim):
    res = K.batch_dot(x[0], x[1], axes=[1, 1])
    return K.reshape(res, [-1, dim])


def attention_output_shape(input_shape):
    shape = list(input_shape[1])
    assert len(shape) == 3
    return [shape[0], shape[2]]


def liter(x, length):
    res = K.repeat(x, length)  # (?, 82, 300)
    return res


def liter_output_shape(input_shape):
    shape = list(input_shape)
    return [shape[0], MAX_LEN, shape[1]]

def build_model(embedding_matrix, position_embeddings_matrix):
    # =========== 定义输入 ==============#
    sentence_input = layers.Input(shape=(MAX_LEN,), dtype='int32', name='sentence_input')
    position_input = layers.Input(shape=(MAX_LEN,), dtype='int32', name='position_input')
    aspect_input = layers.Input(shape=(ASPECT_MAX_LEN,), dtype='int32', name='aspect_input')
    sentence_embedding = layers.Embedding(WORD_NUMS, 300, mask_zero=True, input_length=82, weights = [embedding_matrix])(sentence_input)
    position_embedding = layers.Embedding(MAX_LEN * 2, 50, input_length=MAX_LEN, trainable=True, mask_zero=True, weights = [position_embeddings_matrix])(position_input)
    aspects_embeddings = layers.Embedding(WORD_NUMS, 300, mask_zero=True, input_length=9, weights = [embedding_matrix])(aspect_input)
    print(sentence_embedding.shape)
    inputs = layers.concatenate([sentence_embedding, position_embedding])
    GRUCell = layers.GRU(300, activation='sigmoid', return_sequences=True, recurrent_dropout=0.5, dropout=0.5)
    hiddent_states = layers.Bidirectional(GRUCell)(inputs)
    # ========= source2aspect attention ========= #
    aspect_attention = layers.Dense(1, activation='tanh')(aspects_embeddings)
    aspect_attention = layers.Lambda(normalize, name='aspect_attention')(aspect_attention) # 正则化操作
    aspect_embedding = layers.Lambda(attention,
                              output_shape=attention_output_shape,
                              arguments={'dim': 300})([aspect_attention, aspects_embeddings]) # 进行加权求和

    aspect_embedding = layers.Lambda(liter,
                              output_shape=liter_output_shape,
                              arguments={'length': MAX_LEN})(aspect_embedding)
    x = layers.concatenate([aspect_embedding, hiddent_states])
    x = layers.TimeDistributed(layers.Dense(300, activation='tanh'))(x) # 此处应该是线性组合
    x = layers.concatenate([x, hiddent_states])

    x = layers.TimeDistributed(layers.Dense(1, activation='tanh'))(x)  # (?, 78, 1)
    attention_x = layers.Lambda(normalize,
                         output_shape=reduce_dimension_output_shape,
                         name='attention_x')(x)
    x = layers.Lambda(attention, output_shape=attention_output_shape, arguments={'dim': 600})(
        [attention_x, hiddent_states])  # (?, 600)

    x = layers.Dropout(rate=0.5)(x)
    predictions = layers.Dense(CLASS_NUM, activation='softmax')(x)  # 进行分类操作
    model = tf.keras.Model(inputs=[sentence_input, position_input, aspect_input], outputs=predictions)
    model.compile(loss=['categorical_crossentropy'], optimizer='rmsprop', metrics=['accuracy'])
    # print(model.summary())
    return model

def train(sentence_inputs=[], position_inputs=[], aspect_input=[], labels=[], model=None):
    model.fit({'sentence_input': sentence_inputs, 'position_input': position_inputs, 'aspect_input': aspect_input}, labels, epochs=1, batch_size=64, verbose=2)
    model.save_model()
    return model

def predict(sentence_inputs=[], position_inputs=[], aspect_input=[], model=None):
    results = model.predict({'sentence_input': sentence_inputs, 'position_input': position_inputs, 'aspect_input': aspect_input}, batch_size=64, verbose=0)
    return results

def change(data, aspect_data, word_ids, label):
    sentences = []
    aspects = []
    position_inputs = []
    test = [1,2,3,4]
    test = np.array(test)
    new_labels = []
    for index in range(len(data)):
        aspect_data[index] = aspect_data[index].strip('\n').strip()
        temp_aspect = aspect_data[index].strip('\n')
        sentence = data[index].strip('\n')
        position_input = getPositionEmbeddings(sentence, aspect_data[index], word_ids)
        if position_input== []:
            print('===== discard sentence %d ====' % index)
            continue
        mat_result = np.matmul(test, label[index])
        if (mat_result > 3):
            print('==== aspect disobey class relu, discarded sentence %d ====' % index)
            continue
        new_label = np.zeros((3, ))
        new_label[int(mat_result) - 1] = 1
        new_labels.append(new_label)
        sentences.append(sentence)
        aspects.append(temp_aspect)
        position_inputs.append(position_input)
    data.clear()
    aspect_data.clear()
    word_index.clear()
    return aspects, sentences, position_inputs, new_labels

def loadTrainData(resource):
    data = loadData(resource)  # 加载得到训练集数据 index
    label = loadLabel(resource)  # 加载标签数据，为 (?, 4)
    aspect_data = loadAspectToken(resource)  # 加载aspect数据 index
    return data, label, aspect_data

def loadTestData(resource):
    data = loadData(resource, type = 'test')
    label = loadLabel(resource, type = 'test')
    aspect_data = loadAspectToken(resource, type = 'test')
    return data, label, aspect_data

def formatInput(data, aspect_data):
    sentence_input = []
    aspect_input = []
    for d, a in zip(data,aspect_data):
        d = d.strip('\n').strip()
        a = a.strip('\n').strip()
        d = d.split()
        a = a.split()
        sentence = []
        aspect = []
        for num in d:
            sentence.append(int(num))
        for num in a:
            aspect.append(int(num))
        length_aspect = len(aspect)
        aspect = aspect + [0] * (9 - length_aspect)
        sentence_input.append(sentence)
        aspect_input.append(aspect)
    return sentence_input, aspect_input

def ground_truth_label(labels):
    temp_labels = []
    for label in labels:
        for i,score in enumerate(label):
            if (score == 1):
                temp_labels.append(i)
    return temp_labels
if __name__ == '__main__':
    # =========== 数据加载以及参数初始化 =======================#
    resource = 'laptop'
    model_path = 'model_'
    word_index = loadWordIndex('laptop')
    words_ids = loadWordsIds(max_len=82)
    word_embedding_matrix = transfer(laptop_model, word_index)
    init_position_embeddings_matrix = loadInitPositionEmbeddings(max_len=82, vector_dimension=50)
    # ======  load training data ===== #
    data, label, aspect_data = loadTrainData(resource)
    aspects, sentences, position_input, label = change(data, aspect_data, words_ids, label)
    data, aspect_data = formatInput(sentences, aspects)
    print('==== validate training data %d ======' % len(sentences))
    # ===== load test data ===== #
    test_data, test_label, test_aspect_data = loadTestData(resource)
    test_aspects, test_sentences, test_position_input, test_label = change(test_data, test_aspect_data, words_ids, test_label)
    test_data, test_aspect_data = formatInput(test_sentences, test_aspects)
    test_label = ground_truth_label(test_label)
    print('==== validate test data %d =====' % len(test_sentences))
    # # ============== 进行训练 =========================#
    print('=== embeddings matrix shape', word_embedding_matrix.shape, '======')
    print('=== position embeddings matirx shape', init_position_embeddings_matrix.shape, '=====')
    data = np.array(data)
    aspect_data = np.array(aspect_data)
    position_input = np.array(position_input)
    label = np.array(label)
    test_data = np.array(test_data)
    test_aspect_data = np.array(test_aspect_data)
    test_position_input = np.array(test_position_input)
    for i in range(5):
        model = build_model(embedding_matrix=word_embedding_matrix, position_embeddings_matrix=init_position_embeddings_matrix)
        evalator = Evaluate(test_label)
        epoch = 1
        while epoch <= 80:
            model = train(sentence_inputs=data, position_inputs=position_input, aspect_input=aspect_data, labels=label,
                          model=model)
            results = predict(test_data, test_position_input, test_aspect_data, model)
            f1, acc = evalator.get_f1_score(results, epoch)
            if acc > 0.7535:
                model.save_weights(model_path + "_acc_" + str(acc * 100) + "_F_" + str(f1 * 100) + "_" + str(epoch))
            elif f1 > 0.7080:
                model.save_weights(model_path + "_acc_" + str(acc * 100) + "_F_" + str(f1 * 100) + "_" + str(epoch))
            epoch += 1

    print('finished')