from sklearn.model_selection import train_test_split
from tensorflow import keras
from keras.layers.merge import concatenate
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers.embeddings import Embedding
from keras.layers import Conv1D, MaxPooling1D, Flatten, Dropout, Dense, Input, Lambda, BatchNormalization
from keras.models import Model
import numpy as np

from sklearn import metrics


def run_assess(model_name, predict_test, y_test):
    print(model_name + "文本分类的准确率为：", metrics.accuracy_score(predict_test, y_test))
    print(model_name + "文本分类的精度率为：", metrics.precision_score(predict_test, y_test, average='weighted'))
    print(model_name + "文本分类的召回率为：", metrics.recall_score(predict_test, y_test, average='weighted'))
    print(model_name + "文本分类的F1值为：", metrics.f1_score(predict_test, y_test, average='weighted'))


# 构建TextCNN模型
# 模型结构：词嵌入-卷积池化*3-拼接-全连接-dropout-全连接
# def TextCNN_model_1(x_train_padded_seqs, y_train, x_test_padded_seqs, y_test):

def TextCNN_Model(X, y, num_classes,
                  model_path='../../docs/text_classification/imdb_result/cnn_model.h5'):
    max_features = 6000
    texts = X  # imdb_data['review']
    # 转换为onehot向量，num_words：保留词频前max_features的词汇，其他词删去。仅num_words-1保留最常用的词。
    tok = Tokenizer(num_words=max_features)
    tok.fit_on_texts(texts)
    vocab = tok.word_index
    # 将文本按照词典编号的方式进行编码
    list_tok = tok.texts_to_sequences(texts)

    # 对每个样本最大长度做限制，定为130，其余补0
    maxlen = 130
    seq_tok = pad_sequences(list_tok, maxlen=maxlen)

    x = seq_tok
    num_y = y  # imdb_data['num_sentiment'] 数值型

    embed_size = 128  # 此为通过embedding矩阵乘法，我们想让一个样本（句子）中每个单词压缩成的向量维度

    # x_train, x_test, num_y_train, num_y_test = train_test_split(x, num_y, random_state=41)  # 切分训练、测试集
    x_train_padded_seqs, x_test_padded_seqs, y_train, y_test = train_test_split(x, num_y, random_state=41)  # 切分训练、测试集

    main_input = Input(shape=(130,), dtype='float64')
    # 词嵌入（使用预训练的词向量）
    embedder = Embedding(len(vocab) + 1, 300, input_length=130, trainable=False)
    embed = embedder(main_input)
    # 词窗大小分别为3,4,5
    cnn1 = Conv1D(256, 3, padding='same', strides=1, activation='relu')(embed)
    cnn1 = MaxPooling1D(pool_size=128)(cnn1)
    cnn2 = Conv1D(256, 4, padding='same', strides=1, activation='relu')(embed)
    cnn2 = MaxPooling1D(pool_size=127)(cnn2)
    cnn3 = Conv1D(256, 5, padding='same', strides=1, activation='relu')(embed)
    cnn3 = MaxPooling1D(pool_size=126)(cnn3)
    # 合并三个模型的输出向量
    cnn = concatenate([cnn1, cnn2, cnn3], axis=-1)
    flat = Flatten()(cnn)
    drop = Dropout(0.2)(flat)
    main_output = Dense(2, activation='softmax')(drop)
    model = Model(inputs=main_input, outputs=main_output)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    one_hot_labels = keras.utils.to_categorical(y_train, num_classes=num_classes)  # 将标签转换为one-hot编码
    model.fit(x_train_padded_seqs, one_hot_labels, batch_size=800, epochs=10)
    # y_test_onehot = keras.utils.to_categorical(y_test, num_classes=3)  # 将标签转换为one-hot编码
    result = model.predict(x_test_padded_seqs)  # 预测样本属于每个类别的概率
    result_labels = np.argmax(result, axis=1)  # 获得最大概率对应的标签

    # y_predict = list(map(str, result_labels))
    # print('准确率', metrics.accuracy_score(y_test, y_predict))
    # print('平均f1-score:', metrics.f1_score(y_test, y_predict, average='weighted'))
    # print('准确率', accuracy_score(y_test, result_labels))
    # print('平均f1-score:', f1_score(y_test, result_labels, average='weighted'))

    run_assess("TextCNN模型", y_test, result_labels)
    model.save(model_path)
