#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2018/8/28 19:46
# @Author  : seeledu
# @email   : seeledu@bug.moe
# @File    : TextCNN.py
# @Software: PyCharm
"""
TextCNN模型用于Intent分类
拼接 3个卷积+池化
每一种参数组合进行 5折交叉验证，使用早期停止法
每一折在验证集上结果最好的模型保存到 result_and_model/intent/版本号/number/best文件夹
每一折最好模型的预测结果保存到 result_and_model/intent/版本号/版本号.csv
"""
# from __future__ import print_function
from keras.layers import Conv1D, MaxPool1D
from keras.layers import Flatten, Concatenate
import os
import numpy as np
from keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.layers import Dense, Input, Dropout
from keras.models import Model
from keras.optimizers import Adam, rmsprop
from keras.utils import np_utils
import keras.backend as K
from keras.preprocessing import sequence
import keras.backend.tensorflow_backend as KTF
import tensorflow as tf

# os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

# 自适应显存
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
KTF.set_session(session)

num_fold = 5  # 5折交叉验证
sequence_length = 30  # 句子最大长度
embedding_dim = 768  # 词向量长度
filter_sizes = [[3, 4, 5]]
num_filters_list = [100, 150, 200]
drops = [0.3, 0.5]  # dropout的大小
epochs = 5000  # 最大迭次数（早期停止）
batch_size = 32
num_intent = 24  # Intent一共24种情况

# weight_path = './全交叉验证_HaBERT/全交叉验证_SlotIntent_habert/'
number = 1000  # 模型标识号，一种参数组合对应一个number
data_path = r'../data/npy/'  # 数据集路径
version = 'TextCNN_BERT'
model_root_path = '../result_and_model/intent/' + version + '/'
if not os.path.exists(model_root_path):
    os.makedirs(model_root_path)
output_file = open(model_root_path + version + ".csv", "w", encoding='utf-8')  # 结果文件
output_file.write("number,fold,filter_size,num_filters,dropout,val_acc,test_acc\n")

print('Loading data......')
test_x = np.load(data_path + 'test_x.npy')
test_x = sequence.pad_sequences(test_x, maxlen=sequence_length)
test_y = np.load(data_path + 'test_intent.npy')
test_y = np_utils.to_categorical(test_y, num_classes=num_intent)

for drop in drops:
    for filter_size in filter_sizes:
        for num_filters in num_filters_list:
            # avg_acc = 0
            avg_val_acc = 0  # 每一折在验证集上的结果
            avg_test_acc = 0
            mul = []
            for fold in range(num_fold):
                save_path = model_root_path + str(number) + '/'
                if not os.path.exists(save_path):
                    os.makedirs(save_path)

                x_train = np.load(data_path + 'train_x' + str(fold) + '.npy')
                x_dev = np.load(data_path + 'dev_x' + str(fold) + '.npy')
                x_train = sequence.pad_sequences(x_train, maxlen=sequence_length)
                x_dev = sequence.pad_sequences(x_dev, maxlen=sequence_length)

                y_train = np.load(data_path + 'train_intent_y' + str(fold) + '.npy')
                y_dev = np.load(data_path + 'dev_intent_y' + str(fold) + '.npy')
                y_train = np_utils.to_categorical(y_train, num_classes=num_intent)
                y_dev = np_utils.to_categorical(y_dev, num_classes=num_intent)

                print("Creating Model...")

                inputs = Input(dtype='float32', shape=(sequence_length, embedding_dim))
                # Padding = VALID 不使用0填充
                conv_0 = Conv1D(num_filters, kernel_size=filter_size[0], padding='valid',
                                kernel_initializer='normal', activation='relu')(inputs)
                conv_1 = Conv1D(num_filters, kernel_size=filter_size[1], padding='valid',
                                kernel_initializer='normal', activation='relu')(inputs)
                conv_2 = Conv1D(num_filters, kernel_size=filter_size[2], padding='valid',
                                kernel_initializer='normal', activation='relu')(inputs)

                maxpool_0 = MaxPool1D(pool_size=(sequence_length - filter_size[0] + 1), strides=1, padding='valid')(conv_0)
                maxpool_1 = MaxPool1D(pool_size=(sequence_length - filter_size[1] + 1), strides=1, padding='valid')(conv_1)
                maxpool_2 = MaxPool1D(pool_size=(sequence_length - filter_size[2] + 1), strides=1, padding='valid')(conv_2)

                concatenated_tensor = Concatenate(axis=1)([maxpool_0, maxpool_1, maxpool_2])
                flatten = Flatten()(concatenated_tensor)
                dropout = Dropout(drop)(flatten)
                output = Dense(units=num_intent, activation='softmax')(dropout)

                model = Model(inputs=inputs, outputs=output)
                checkpoint = ModelCheckpoint(save_path + str(fold) + 'weights.{epoch:03d}-{val_acc:.4f}.hdf5',
                                             monitor='val_acc', verbose=1, save_best_only=True, mode='auto')
                adam = Adam(lr=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
                model.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])

                print("Traning Model...")
                model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs,
                          verbose=1, callbacks=[checkpoint,
                                                EarlyStopping(
                                                    monitor='val_acc',
                                                    verbose=2,
                                                    patience=30,
                                                    mode='max')],
                          validation_data=(x_dev, y_dev))  # starts training
                # 对已经训练好的那一折的模型按照修改时间排序，最新的是最好的模型，拿这个去做交叉验证。
                l = os.listdir(path=save_path)
                l.sort(key=lambda fn: os.path.getmtime(save_path + "/" + fn) if not os.path.isdir(
                    save_path + "/" + fn) else 0)  # 第二句

                model.load_weights(save_path + l[-1])
                if not os.path.exists(save_path + '/best/'):
                    os.makedirs(save_path + '/best/')
                model.save(save_path + '/best/' + l[-1])

                # ------7.13no test-------------------------
                # if len(mul) == 0:
                #     mul = model.predict(test_x)
                # else:
                #     mul += model.predict(test_x)
                # ------7.13no test-------------------------

                # output_file.write(str(number) + "\t" + str(batch_size) + "\t" + str(lstm_cell)
                #                   + "\t" + str(dense_cell) + "\t" + str(model.evaluate(test_x, test_y)[1]) + "\n")
                dev_result = model.evaluate(x_dev, y_dev)
                test_result = model.evaluate(test_x, test_y)
                avg_val_acc += dev_result[1]
                avg_test_acc += test_result[1]
                output_file.write(str(number) + "," + str(fold) + "," + str(filter_size) + "," + str(num_filters) + ","
                                  + str(drop) + "," + str(dev_result[1]) + "," + str(test_result[1]) + "\n")

                K.clear_session()
                del model

            # right = 0
            # ------7.13no test-------------------------
            # for k, tmp in enumerate(mul):
            #     if tmp.argmax() == test_y[k].argmax():
            #         right += 1
            # # output_file.write("dropput\tfilter_sizes\n")
            # # output_file.write(str(drop)+'\t'+str(filter_size) +'\n')
            # output_file.write(str(avg_val_acc / 10) + '\t' + str(1.0 * right / mul.shape[0]) + '\n')
            # print(str(avg_val_acc / 10) + '\t' + str(1.0 * right / mul.shape[0]))
            # output_file.flush()
            # ------7.13no test-------------------------END

            # output_file.write(str(avg_val_acc / 5) + '\t' + 'filter_sizes{}drop{}num_filter{}'.format("357", drop, num_filters) + '\n')
            # print('filter_sizes{}dense{}num_filter{}\tdev_result:'.format(345, drop, num_filters))
            output_file.write(
                "Number:" + str(number) + "Average Validation accuracy:" + str(avg_val_acc / 5) + "Average Test accuracy:" +
                str(avg_test_acc / 5) + "\n\n")
            print("Number:", number, "Average Validation accuracy:", str(avg_val_acc / 5), "Average Test accuracy:",
                  str(avg_test_acc / 5))
            # output_file.flush()
            number += 1

output_file.close()
