# -*- coding: utf-8 -*-
# @Time    : 2019/1/6 9:01
# @Author  : seeledu
# @email   : seeledu@bug.moe
# @File    : BLSTM.py
# @Software: PyCharm
"""
双向LSTM + Attention + 两个全连接层
使用Word2Vec词向量
本代码在比赛中没有使用，BLSTM_att1 2 3在本代码基础上改的
"""
import os
import joblib
import numpy as np
from attention import AttentionWithContext
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Input, Bidirectional, Embedding, Dropout, LSTM
from keras.models import Model
from keras.optimizers import Adam, rmsprop
from keras.utils import np_utils
from sklearn.model_selection import StratifiedKFold
import data_helpers
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# 自适应显存
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
KTF.set_session(session)


def pretrained_embedding_layer(word_to_vec_map, word_to_index):
    """
    Creates a Keras Embedding() layer and loads in pre-trained W2v 400(200)-dimensional vectors.

    Arguments:
    word_to_vec_map -- dictionary mapping words to their GloVe vector representation.
    word_to_index -- dictionary mapping from words to their indices in the vocabulary (400,001 words)

    Returns:
    embedding_layer -- pretrained layer Keras instance
    """

    vocab_len = len(word_to_index) + 1  # adding 1 to fit Keras embedding (requirement)
    null_word = np.zeros(word_to_vec_map["啊"].shape[0])
    emb_dim = word_to_vec_map["啊"].shape[0]
    # Initialize the embedding matrix as a numpy array of zeros of shape (vocab_len, dimensions of word vectors = emb_dim)
    emb_matrix = np.zeros((vocab_len, emb_dim))

    # Set each row "index" of the embedding matrix to be the word vector representation of the "index"th word of the vocabulary
    for word, index in word_to_index.items():
        try:
            emb_matrix[index, :] = word_to_vec_map[word]
        except KeyError:
            # emb_matrix[index, :] = word_to_vec_map["啊"]
            emb_matrix[index, :] = null_word
    # Define Keras embedding layer with the correct output/input sizes, make it trainable. Use Embedding(...). Make sure to set trainable=False.
    embedding_layer = Embedding(vocab_len, emb_dim, trainable=False)
    # Build the embedding layer, it is required before setting the weights of the embedding layer. Do not modify the "None".
    embedding_layer.build((None,))
    # Set the weights of the embedding layer to the embedding matrix. Your layer is now pretrained.
    embedding_layer.set_weights([emb_matrix])

    return embedding_layer


print('Loading data......')
x, y, slots, vocabulary, vocabulary_inv = data_helpers.load_data()
train_x = x[:-4528]
train_y = y[:-4528]
train_slot = slots[:-4528]
test_x = x[-4528:]
test_y = y[-4528:]
test_slot = slots[-4528:]
test_y = np_utils.to_categorical(test_y, 31)
# x_train, x_dev, y_train, y_dev = train_test_split(x, y, test_size=0.2, random_state=42)

# 参数一览
# word_to_vec_map = joblib.load('data/pre-trained word vector/mobile_data&test.pkl')  # pre-trained embedding vector
# 最后时段用的词向量data/pre-trained word vector/mobile_data&test&testD.pkl'
# TODO: try to use bert
# word_to_vec_map = joblib.load('H:/research/data/word_embedding/10G_dict.pkl')  # pre-trained embedding vector
word_to_vec_map = joblib.load('/media/seele/机械硬盘/model_weight/10G_dict.pkl')  #
# word_to_vec_map = KeyedVectors.load_word2vec_format('/media/seele/机械硬盘/model_weight/Tencent_AILab_ChineseEmbedding/'
#                               'Tencent_AILab_ChineseEmbedding.txt', binary=False)

# word_to_vec_map = joblib.load('D:/vincy/10G_dict.pkl')  # pre-trained embedding vector
sequence_length = 30  # 句子最大长度
maxlen = 64
vocabulary_size = len(vocabulary)  # 词典长度
embedding_dim = 400  # 词向量长度 AI-lab -200 10G-dict-400
epochs = 100  # 最大迭次数
embedding_layer = pretrained_embedding_layer(word_to_vec_map, vocabulary)
del word_to_vec_map
kf = StratifiedKFold(n_splits=10, shuffle=True, random_state=7)

batch_sizes = [32]  # 一次迭代的例子数
lstm_cells = [256, 512]
dense_cells = [256, 512]
drops = [0.3, 0.5]  # dropout的大小
learning_rate = [1e-3]
num_intent = 24  # Intent类别数量

output_file = open("./result_and_model/10GdictBLSTM_att2.csv", "w", encoding='utf8')
output_file.write("number\tbatch_sizes\tlstm_cell\tdense_cell\tscout\n")
number = 1000

for drop in drops:
    for batch_size in batch_sizes:
        for lstm_cell in lstm_cells:
            for dense_cell in dense_cells:
                avg_acc = 0
                avg_val_acc = 0
                ii = 0
                mul = []
                for train_index, dev_index in kf.split(train_x, train_y):
                    path = '/media/seele/机械硬盘/seeledu/10GdictBLSTM_att/BLSTN/10FoldVal/' + str(number) + '/'

                    if not os.path.exists(path):
                        os.makedirs(path)
                    ii += 1
                    x_train, x_dev = train_x[train_index], train_x[dev_index]
                    y_train, y_dev = train_y[train_index], train_y[dev_index]

                    y_train = np_utils.to_categorical(y_train, 31)  # 一共31情况
                    y_dev = np_utils.to_categorical(y_dev, 31)  # 一共31情况
                    print("Creating Model...")
                    inputs = Input(dtype='float32', shape=(sequence_length,))
                    embedding = embedding_layer(inputs)
                    # embedding = Position_Embedding()(embedding)  # 增加Position_Embedding能轻微提高准确率
                    # att_layer = Attention(8, 16)([embedding, embedding, embedding])
                    # use for intent classification
                    lstm_1 = Bidirectional(LSTM(lstm_cell, return_sequences=True))(embedding)
                    # lstm_2 = LSTM(lstm_cell, go_backwards=True)(att_layer)
                    # concat = concat([lstm_1,lstm_2])(lstm_1,lstm_2)
                    # lstm_1 = BatchNormalization()(lstm_1)
                    att_layer = AttentionWithContext()(lstm_1)

                    # lstm_2 = LSTM(lstm_cell)(att_layer)
                    # O_seq = GlobalAveragePooling1D()(lstm_1)
                    dense_1 = Dense(units=dense_cell, activation='relu')(att_layer)
                    drop3 = Dropout(drop)(dense_1)
                    output = Dense(units=31, activation='softmax')(att_layer)

                    # use for slot filling filling
                    # dense_22 = Dense(units=dense_cell, activation='relu')(att_layer)
                    # lstm_22 = LSTM(lstm_cell, return_sequences=True)
                    # output22 = TimeDistributed(Dense(n_classes2, activation='softmax'))

                    # this creates a model that includes
                    model = Model(inputs=inputs, outputs=output)
                    # model2 = Model(inputs=inputs, outputs = output22)
                    checkpoint = ModelCheckpoint(path + 'weights.{epoch:03d}-{val_acc:.4f}.hdf5', monitor='val_acc',
                                                 verbose=1,
                                                 save_best_only=True, mode='auto')
                    adam = Adam(lr=learning_rate[0], beta_1=0.9, beta_2=0.999, epsilon=1e-08)

                    model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
                    # model2.compile(optimizer=adam, loss='categorical_crossentropy')
                    print("Traning Model...")

                    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
                              verbose=2, callbacks=[checkpoint,
                                                    EarlyStopping(
                                                        monitor='val_acc',
                                                        verbose=2,
                                                        patience=30,
                                                        mode='max')],
                              validation_data=(x_dev, y_dev))  # starts training
                    # 对已经训练好的那一折的模型按照修改时间排序，最新的是最好的模型，拿这个去做交叉验证。
                    result_dir = path
                    l = os.listdir(path=result_dir)
                    l.sort(key=lambda fn: os.path.getmtime(result_dir + "/" + fn) if not os.path.isdir(
                        result_dir + "/" + fn) else 0)  # 第二句

                    model.load_weights(result_dir + l[-1])
                    if len(mul) == 0:
                        mul = model.predict(test_x)
                    else:
                        mul += model.predict(test_x)
                    output_file.write(str(number) + "\t" + str(batch_size) + "\t" + str(lstm_cell)
                                      + "\t" + str(dense_cell) + "\t" + str(model.evaluate(test_x, test_y)[1]) + "\n")
                    avg_val_acc += model.evaluate(x_dev, y_dev)[1]
                    del model
                number += 1
                right = 0
                for k, tmp in enumerate(mul):
                    if tmp.argmax() == test_y[k].argmax():
                        right += 1
                output_file.write(str(avg_val_acc / 10) + '\t' + str(1.0 * right / mul.shape[0]) + '\n')
                print(str(avg_val_acc / 10) + '\t' + str(1.0 * right / mul.shape[0]))
output_file.close()
