# -*- coding: utf-8 -*-
# @Time    : 2019/3/28 23:21
# @Author  : seeledu
# @email   : seeledu@bug.moe
# @File    : BLSTM_bert_att2.py
# @Software: PyCharm
"""
测试naacl2016中的att机制
 Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
    "Hierarchical Attention Networks for Document Classification"
    by using a context vector to assist the attention
"""
import os
import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Input, Bidirectional, LSTM, Dropout
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf
from keras.models import Model
from keras.optimizers import Adam, rmsprop
from keras.utils import np_utils
from attention_keras import Attention, Position_Embedding
import keras.backend as K
from keras.preprocessing import sequence
# from __future__ import print_function
# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# 自适应显存
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
KTF.set_session(session)

num_fold = 5
sequence_length = 30  # 句子最大长度
embedding_dim = 768  # 词向量长度 AI-lab -200 10G-dict-400 bert 768
epochs = 5000  # 最大迭次数
batch_sizes = [16]  # 一次迭代的例子数
lstm_cells = [64, 128]
dense_cells = [128, 256]  # 隐藏层的单元数
drops = [0.5, 0.3]  # dropout的大小
learning_rate = [1e-3]
num_intent = 24  # Intent类别数量
# weight_path = './全交叉验证_HaBERT/全交叉验证_SlotIntent_habert/'
data_path = r'../data/npy/'
version = 'BLSTM_att2_BERT'
model_root_path = r'../result_and_model/intent/' + version + '/'
if not os.path.exists(model_root_path):
    os.makedirs(model_root_path)
output_file = open(model_root_path + version + ".csv", "w", encoding='utf-8')
output_file.write("number,fold,lstm_cell,dense_cell,dropout,val_acc,test_acc\n")
# output_file.write("number\tbatch_sizes\tlstm_cell\tdense_cell\tscout\n")
number = 10000

print('Loading data......')
test_x = np.load(data_path + 'test_x.npy')
test_x = sequence.pad_sequences(test_x, maxlen=sequence_length, dtype='float32')
test_y = np.load(data_path + 'test_y.npy')
test_y = np_utils.to_categorical(test_y, num_intent)  # 一共31情况

for drop in drops:
    for batch_size in batch_sizes:
        for lstm_cell in lstm_cells:
            for dense_cell in dense_cells:
                # avg_acc = 0
                avg_val_acc = 0
                avg_test_acc = 0
                # mul = []
                for fold in range(num_fold):
                    save_path = model_root_path + str(number) + '/'
                    if not os.path.exists(save_path):
                        os.makedirs(save_path)
                    # path = '/media/seele/机械硬盘/APC/BLSTM_att2_result_alldata_HaBERT/dense{}dropout{}/'.format(dense_cell, drop)

                    x_train = np.load(data_path + 'train_x' + str(fold) + '.npy')
                    x_dev = np.load(data_path + 'dev_x' + str(fold) + '.npy')
                    x_train = sequence.pad_sequences(x_train, maxlen=sequence_length)
                    x_dev = sequence.pad_sequences(x_dev, maxlen=sequence_length)

                    y_train = np.load(data_path + 'train_intent_y' + str(fold) + '.npy')
                    y_dev = np.load(data_path + 'dev_intent_y' + str(fold) + '.npy')
                    y_train = np_utils.to_categorical(y_train, num_intent)
                    y_dev = np_utils.to_categorical(y_dev, num_intent)

                    print("Creating Model......")
                    inputs = Input(dtype='float32', shape=(sequence_length, embedding_dim))
                    # embedding = Position_Embedding()(embedding)  # 增加Position_Embedding能轻微提高准确率
                    att_layer = Attention(8, 20)([inputs, inputs, inputs])
                    # use for intent classification
                    lstm_1 = Bidirectional(LSTM(lstm_cell, dropout=drop))(inputs)
                    # att_layer, alpha = AttentionLayer(name='AttentionLayer')(lstm_1)

                    # lstm_2 = LSTM(lstm_cell, go_backwards=True)(att_layer)
                    # concat = concat([lstm_1,lstm_2])(lstm_1,lstm_2)
                    # lstm_1 = BatchNormalization()(lstm_1)
                    # att_layer = AttentionWithContext()(lstm_1)

                    # lstm_2 = LSTM(lstm_cell)(att_layer)
                    # O_seq = GlobalAveragePooling1D()(lstm_1)
                    # dense_1 = Dense(units=dense_cell, activation='relu')(lstm_1)
                    # drop3 = Dropout(drop)(dense_1)
                    output = Dense(units=num_intent, activation='softmax')(lstm_1)

                    # use for slot filling filling
                    # dense_22 = Dense(units=dense_cell, activation='relu')(att_layer)
                    # lstm_22 = LSTM(lstm_cell, return_sequences=True)
                    # output22 = TimeDistributed(Dense(n_classes2, activation='softmax'))

                    # this creates a model that includes
                    model = Model(inputs=inputs, outputs=output)
                    # model2 = Model(inputs=inputs, outputs = output22)
                    checkpoint = ModelCheckpoint(save_path + str(fold) + 'weights.{epoch:03d}-{val_acc:.4f}.hdf5',
                                                 monitor='val_acc', verbose=1, save_best_only=True, mode='auto')
                    adam = Adam(lr=learning_rate[0], beta_1=0.9, beta_2=0.999, epsilon=1e-08)

                    model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
                    # model2.compile(optimizer=adam, loss='categorical_crossentropy')
                    print("Training Model......")

                    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
                              verbose=2, callbacks=[checkpoint,
                                                    EarlyStopping(
                                                        monitor='val_acc',
                                                        verbose=2,
                                                        patience=30,
                                                        mode='max')],
                              validation_data=(x_dev, y_dev))  # starts training
                    # 对已经训练好的那一折的模型按照修改时间排序，最新的是最好的模型，拿这个去做交叉验证。
                    l = os.listdir(path=save_path)
                    l.sort(key=lambda fn: os.path.getmtime(save_path + "/" + fn) if not os.path.isdir(save_path + "/" + fn) else 0)

                    model.load_weights(save_path + l[-1])
                    if not os.path.exists(save_path + '/best/'):
                        os.makedirs(save_path + '/best/')
                    model.save(save_path + '/best/' + l[-1])

                    # 7.13 no test--------------------------------------
                    # if len(mul)==0:
                    #     mul = model.predict(test_x)
                    # else:
                    #     mul += model.predict(test_x)
                    # 7.13 no test-------------------------------------- END

                    # output_file.write(str(number) + "\t" + str(batch_size) + "\t" + str(lstm_cell)
                    #                   + "\t" + str(dense_cell) + "\t" + str(model.evaluate(test_x, test_y)[1]) + "\n")
                    # avg_val_acc += model.evaluate(x_dev, y_dev)[1]
                    # output_file.flush()

                    dev_result = model.evaluate(x_dev, y_dev)
                    test_result = model.evaluate(test_x, test_y)
                    avg_val_acc += dev_result[1]
                    avg_test_acc += test_result[1]
                    output_file.write(str(number) + "," + str(fold) + "," + str(lstm_cell) + "," + str(dense_cell) + ","
                                      + str(drop) + "," + str(dev_result[1]) + "," + str(test_result[1]) + "\n")

                    K.clear_session()
                    del model
                # number += 1
                # right = 0

                # -7.13 no test---------------------------------------
                # for k,tmp in enumerate(mul):
                #     if tmp.argmax() == test_y[k].argmax():
                #         right += 1

                # output_file.write(str(avg_val_acc/10)+'\t'+str(1.0*right/mul.shape[0])+'\n')
                # print(str(avg_val_acc/10)+'\t'+str(1.0*right/mul.shape[0]))
                # output_file.flush()
                # -7.13 no test---------------------------------------END
                output_file.write("Number:" + str(number) + "Average Validation accuracy:" + str(
                    avg_val_acc / 5) + "Average Test accuracy:" + str(avg_test_acc / 5) + "\n\n")
                print("Number:", number, "Average Validation accuracy:", str(avg_val_acc / 5), "Average Test accuracy:", str(avg_test_acc / 5))
                number += 1

                # output_file.write(str(avg_val_acc / 5) + '\t' + 'dense{}\tdrop{}'.format(dense_cell, drop) + '\n')
                # print('dense{}\tdrop{}\tdev_result:'.format(dense_cell, drop))
                # print(str(avg_val_acc / 5))
                # output_file.flush()

output_file.close()
