# 作者 :南雨
# 时间 : 2022/6/28 9:43
from tensorflow.keras import Input
from tensorflow.keras.layers import Conv1D, MaxPool1D, Dense, Flatten, concatenate, Embedding, LSTM, Dropout
from tensorflow.keras.models import Model
from med_w2vec import build_dict_vector, get_list
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.layers import BatchNormalization  # 规范化
from tensorflow.keras.callbacks import ReduceLROnPlateau
from tensorflow.keras.regularizers import l2
import tensorflow as tf
from tensorflow.keras.layers import Dense, Dropout, Flatten, MaxPool1D, Conv1D
from tensorflow.keras.layers import Embedding, LSTM
from med_constant import Med_constant

train_X, train_y, test_X, test_y, valid_X, valid_y = get_list()
dict_word, dict_word_vector, embeddings_matrix = build_dict_vector()


def CNN_LSTM(max_sequence_length, max_token_num, embedding_dim, output_dim,
             embedding_matrix=embeddings_matrix):
    # 输入层
    x_input = Input(shape=(max_sequence_length,), name="Input_layer")

    # 词嵌入层
    if embedding_matrix is None:
        x_emb = Embedding(input_dim=max_token_num, output_dim=embedding_dim, input_length=max_sequence_length)(x_input)
    else:
        x_emb = Embedding(input_dim=max_token_num, output_dim=embedding_dim, input_length=max_sequence_length,
                          weights=[embedding_matrix], trainable=True)(x_input)

    dropout0 = Dropout(0.3)(x_emb)

    # 卷积层以及池化层
    pool_output = []
    kernel_sizes = [2, 3]
    for kernel_size in kernel_sizes:
        c = Conv1D(filters=40, kernel_size=kernel_size, strides=1)(dropout0)
        p = MaxPool1D(pool_size=int(c.shape[1]))(c)
        pool_output.append(p)

    pool_output = concatenate([p for p in pool_output])

    # LSTM层
    lstm_out0 = LSTM(Med_constant.lstm_output_size, return_sequences=True)(pool_output)
    # drop_out = Dropout(0.3)(lstm_out0)
    # lstm_out1= LSTM(80)(drop_out)

    # 全连接层
    x_flatten = Flatten()(lstm_out0)
    drop_out = Dropout(0.5)(x_flatten)
    # 归一化层
    batch_layer = BatchNormalization()(drop_out)
    y = Dense(output_dim, activation='softmax', kernel_regularizer=l2(0.005))(batch_layer)

    model = Model([x_input], outputs=[y])

    model.summary()

    return model


def train():
    # 设置参数
    feature_size = Med_constant.max_len
    embed_size = Med_constant.vector_size
    num_classes = len(Med_constant.category_dict)
    regularizers_lambda = 0.02
    learning_rate = 0.001
    batch_size = Med_constant.batch_size
    epochs = Med_constant.epoch
    model = CNN_LSTM(max_sequence_length=feature_size, max_token_num=len(embeddings_matrix),
                     embedding_dim=embed_size,
                     output_dim=num_classes)

    # model.compile(tf.optimizers.Adam(learning_rate=learning_rate),
    #               loss='categorical_crossentropy',
    #               metrics=['accuracy', km.f1_score(), km.recall(), km.precision()])
    model.compile(tf.optimizers.Adam(learning_rate=learning_rate),
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    # 早停
    early_stopping = EarlyStopping(monitor='val_loss', patience=3, mode='auto')
    # 自动调节学习率
    rl = ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=1, mode='auto', cooldown=0,
                           min_lr=0)
    history = model.fit(train_X, train_y,
                        batch_size=batch_size,
                        epochs=epochs,
                        workers=2,
                        use_multiprocessing=True,
                        callbacks=[early_stopping, rl],
                        validation_data=(valid_X, valid_y), verbose=1)

    model.save("../mymodel/med.h5")
    return model


if __name__ == '__main__':
    train()
