# -*- coding: utf-8 -*-
# @Time    : 2019/3/28 23:21
# @Author  : seeledu
# @email   : seeledu@bug.moe
# @File    : BLSTM_bert_att3.py
# @Software: PyCharm
"""
测试naacl2016中的att机制
 Follows the work of Yang et al. [https://www.cs.cmu.edu/~diyiy/docs/naacl16.pdf]
    "Hierarchical Attention Networks for Document Classification"
    by using a context vector to assist the attention
"""

from __future__ import print_function

# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

import os
import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Input, Bidirectional, Dropout, LSTM
from keras.models import Model
from keras.optimizers import Adam, rmsprop
from keras.utils import np_utils
import keras.backend as K
from keras.preprocessing import sequence
from AttentionLayer import AttentionLayer
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf

# 自适应显存
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
KTF.set_session(session)

num_fold = 5
sequence_length = 30  # 句子最大长度
embedding_dim = 768  # 词向量长度 AI-lab -200 10G-dict-400 bert 768
epochs = 5000  # 最大迭次数
batch_sizes = [32]  # 一次迭代的例子数
lstm_cells = [64, 128]
dense_cells = [128, 256]  # 隐藏层的单元数
drops = [0.3, 0.5]  # dropout的大小
learning_rate = [1e-3]
num_intent = 24  # Intent类别数量
# weight_path = './全交叉验证_HaBERT/全交叉验证_SlotIntent_habert/'
data_path = r'../data/npy/'
version = 'BLSTM_att3_BERT'
model_root_path = r'../result_and_model/intent/' + version + '/'
if not os.path.exists(model_root_path):
    os.makedirs(model_root_path)
output_file = open(model_root_path + version + ".csv", "w", encoding='utf-8')
output_file.write("number,fold,lstm_cell,dense_cell,dropout,val_acc,test_acc\n")
# output_file.write("number\tbatch_sizes\tlstm_cell\tdense_cell\tscout\n")
number = 10000

print('Loading data......')
test_x = np.load(data_path + 'test_x.npy')
test_x = sequence.pad_sequences(test_x, maxlen=sequence_length, dtype='float32')
test_y = np.load(data_path + 'test_intent.npy')
test_y = np_utils.to_categorical(test_y, num_intent)

for drop in drops:
    for batch_size in batch_sizes:
        for lstm_cell in lstm_cells:
            for dense_cell in dense_cells:
                # avg_acc = 0
                avg_val_acc = 0
                avg_test_acc = 0
                # mul = []
                for fold in range(num_fold):
                    save_path = model_root_path + str(number) + '/'
                    if not os.path.exists(save_path):
                        os.makedirs(save_path)
                    # path = '/media/seele/机械硬盘/APC/BLSTM_att3_result_alldata_HaBERT/dense{}dropout{}/'.format(dense_cell,
                    #                                                                                          drop)

                    x_train = np.load(data_path + 'train_x' + str(fold) + '.npy')
                    x_dev = np.load(data_path + 'dev_x' + str(fold) + '.npy')
                    x_train = sequence.pad_sequences(x_train, maxlen=sequence_length)
                    x_dev = sequence.pad_sequences(x_dev, maxlen=sequence_length)

                    y_train = np.load(data_path + 'train_intent_y' + str(fold) + '.npy')
                    y_dev = np.load(data_path + 'dev_intent_y' + str(fold) + '.npy')
                    y_train = np_utils.to_categorical(y_train, num_intent)
                    y_dev = np_utils.to_categorical(y_dev, num_intent)

                    print("Creating Model......")
                    inputs = Input(dtype='float32', shape=(sequence_length, embedding_dim))
                    # embedding = Position_Embedding()(embedding)  # 增加Position_Embedding能轻微提高准确率
                    # att_layer = Attention(8, 16)([embedding, embedding, embedding])
                    # use for intent classification
                    lstm_1 = Bidirectional(LSTM(lstm_cell, dropout=drop, return_sequences=True))(inputs)
                    att_layer, alpha = AttentionLayer(name='AttentionLayer')(lstm_1)

                    # lstm_2 = LSTM(lstm_cell, go_backwards=True)(att_layer)
                    # concat = concat([lstm_1,lstm_2])(lstm_1,lstm_2)
                    # lstm_1 = BatchNormalization()(lstm_1)
                    # att_layer = AttentionWithContext()(lstm_1)

                    # lstm_2 = LSTM(lstm_cell)(att_layer)
                    # O_seq = GlobalAveragePooling1D()(lstm_1)
                    # dense_1 = Dense(units=dense_cell, activation='relu')(lstm_1)
                    # drop3 = Dropout(drop)(dense_1)
                    output = Dense(units=num_intent, activation='softmax')(att_layer)

                    # use for slot filling filling
                    # dense_22 = Dense(units=dense_cell, activation='relu')(att_layer)
                    # lstm_22 = LSTM(lstm_cell, return_sequences=True)
                    # output22 = TimeDistributed(Dense(n_classes2, activation='softmax'))

                    # this creates a model that includes
                    model = Model(inputs=inputs, outputs=output)
                    checkpoint = ModelCheckpoint(save_path + str(fold) + 'weights.{epoch:03d}-{val_acc:.4f}.hdf5',
                                                 monitor='val_acc', verbose=1, save_best_only=True, mode='auto')
                    adam = Adam(lr=learning_rate[0], beta_1=0.9, beta_2=0.999, epsilon=1e-08)

                    model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
                    # model2.compile(optimizer=adam, loss='categorical_crossentropy')
                    print("Traning Model......")
                    model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
                              verbose=1, callbacks=[checkpoint, EarlyStopping(
                                                        monitor='val_acc',
                                                        verbose=1,
                                                        patience=20,
                                                        mode='max')],
                              validation_data=(x_dev, y_dev))  # starts training
                    # 对已经训练好的那一折的模型按照修改时间排序，最新的是最好的模型，拿这个去做交叉验证。
                    l = os.listdir(path=save_path)
                    l.sort(key=lambda fn: os.path.getmtime(save_path + "/" + fn) if not os.path.isdir(save_path + "/" + fn) else 0)  # 第二句

                    model.load_weights(save_path + l[-1])
                    if not os.path.exists(save_path + '/best/'):
                        os.makedirs(save_path + '/best/')
                    model.save(save_path + '/best/' + l[-1])

                    dev_result = model.evaluate(x_dev, y_dev)
                    test_result = model.evaluate(test_x, test_y)
                    avg_val_acc += dev_result[1]
                    avg_test_acc += test_result[1]
                    output_file.write(str(number) + "," + str(fold) + "," + str(lstm_cell) + "," + str(dense_cell) + ","
                                      + str(drop) + "," + str(dev_result[1]) + "," + str(test_result[1]) + "\n")

                    K.clear_session()
                    del model

                output_file.write("Number:" + str(number) + "Average Validation accuracy:" + str(
                    avg_val_acc / 5) + "Average Test accuracy:" + str(avg_test_acc / 5) + "\n\n")
                print("Number:", number, "Average Validation accuracy:", str(avg_val_acc / 5), "Average Test accuracy:",
                      str(avg_test_acc / 5))
                number += 1

output_file.close()
