import pandas as pd
import utils
from sklearn.feature_extraction.text import TfidfVectorizer
import jieba
import keras
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers.embeddings import Embedding
from keras.layers import Conv1D, MaxPooling1D, Flatten, Dropout, Dense, Input
from keras.models import Sequential
from sklearn.model_selection import train_test_split
from sklearn import metrics
import numpy as np
from keras.layers import BatchNormalization
from keras.utils.np_utils import to_categorical
from keras import Model
from keras.layers import concatenate

##CNN实现对文本的分类
data = utils.load_thucnews()
THUCNews_data = pd.DataFrame(data, columns=["words", "sentiment"])

THUCNews_copy = THUCNews_data.copy()
index_THUCNews = THUCNews_copy['words'].count()
for i in range(int(index_THUCNews)):
        res = 0
        temp = THUCNews_copy['sentiment'][i]
        if temp == '体育':
            res = 1
        elif temp =='娱乐':
            res = 2
        elif temp =='家具':
            res = 3
        elif temp =='房产':
            res = 4
        elif temp =='教育':
            res = 5
        elif temp =='时尚':
            res = 6
        elif temp =='时政':
            res = 7
        elif temp =='游戏':
            res = 8
        elif temp =='科技':
            res = 8
        else:
            res = 10
        THUCNews_copy['sentiment'][i] = res
THUCNews_copy['sentiment'] = THUCNews_copy['sentiment'].astype('int')

#TfidfVectorizer()函数
#统计某训练文本中，某个词的出现次数
THUC_vectorizer =TfidfVectorizer(token_pattern='\[?\w+\]?', stop_words=utils.chinese_stopwords)
THUCNews_words= THUC_vectorizer.fit_transform(THUCNews_copy["words"])
THUCNews_sentiment = THUCNews_copy["sentiment"]

THUCNews_train_words,THUCNews_test_words,THUCNews_train_sentiment,THUCNews_test_sentiment = train_test_split(THUCNews_words,THUCNews_sentiment)
#数据预处理
def THUCNewsdata_process(max_len=200):           #path为句子的存储路径，max_len为句子的固定长度
    dataset = THUCNews_copy.astype('str')
    cw = lambda x: list(jieba.cut(x))         # 定义分词函数
    dataset['words'] = dataset['words'].apply(cw)  # 将句子进行分词
    tokenizer = Tokenizer()                   # 创建一个Tokenizer对象，将一个词转换为正整数
    tokenizer.fit_on_texts(dataset['words'])  #将词编号，词频越大，编号越小
    vocab = tokenizer.word_index              # 得到每个词的编号
    x_train, x_test, y_train, y_test = train_test_split(dataset['words'], dataset['sentiment'], test_size=0.1)  #划分数据集
    x_train_word_ids = tokenizer.texts_to_sequences(x_train)     #将测试集列表中每个词转换为数字
    x_test_word_ids = tokenizer.texts_to_sequences(x_test)       #将训练集列表中每个词转换为数字
    x_train_padded_seqs = pad_sequences(x_train_word_ids, maxlen=max_len)  # 将每个句子设置为等长，每句默认为200
    x_test_padded_seqs = pad_sequences(x_test_word_ids, maxlen=max_len)    #将超过固定值的部分截掉，不足的在最前面用0填充
    return x_train_padded_seqs,y_train,x_test_padded_seqs,y_test,vocab

#构建CNN分类模型(LeNet-5)
#模型结构：嵌入-卷积池化*2-dropout-BN-全连接-dropout-全连接
def CNN_model(x_train_padded_seqs, y_train, x_test_padded_seqs, y_test,vocab):
    model = Sequential()
    model.add(Embedding(len(vocab) + 1, 300, input_length=200)) #使用Embeeding层将每个词编码转换为词向量
    model.add(Conv1D(256, 5, padding='same'))
    model.add(MaxPooling1D(11, 11, padding='same'))
    model.add(Conv1D(128, 5, padding='same'))
    model.add(MaxPooling1D(11, 11, padding='same'))
    model.add(Conv1D(64, 11, padding='same'))
    model.add(Flatten())
    model.add(Dropout(0.1))
    model.add(BatchNormalization())  # (批)规范化层
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(11, activation='softmax'))
    model.compile(loss='categorical_crossentropy',optimizer='adam',metrics=['accuracy'])
    one_hot_labels = keras.utils.np_utils.to_categorical(y_train, num_classes=11)  # 将标签转换为one-hot编码
    model.fit(x_train_padded_seqs, one_hot_labels,epochs=5, batch_size=800)
    predict_x=model.predict(x_test_padded_seqs)
    y_predict=np.argmax(predict_x,axis=1)
    # y_predict = model.predict_classes(x_test_padded_seqs)  # 预测的是类别，结果就是类别号
    y_predict = list(map(str, y_predict))
    print('准确率', metrics.accuracy_score(y_test, y_predict))
    print('f1:', metrics.f1_score(y_test, y_predict, average='weighted'))
    print('召回率', metrics.recall_score(y_test, y_predict,average='weighted'))

# THUCNews_x_train, THUCNews_y_train, THUCNews_x_test, THUCNews_y_test,THUCNews_vocab =  THUCNewsdata_process()
# CNN_model(THUCNews_x_train, THUCNews_y_train, THUCNews_x_test, THUCNews_y_test,THUCNews_vocab)

#构建TextCNN模型
#模型结构：词嵌入-卷积池化*3-拼接-全连接-dropout-全连接
def TextCNN_model_1(x_train_padded_seqs,y_train,x_test_padded_seqs,y_test,vocab):
    main_input = Input(shape=(200,), dtype='float64')
    # 词嵌入（使用预训练的词向量）
    embedder = Embedding(len(vocab) + 1, 300, input_length=50, trainable=False)
    embed = embedder(main_input)
    # 词窗大小分别为3,4,5
    cnn1 = Conv1D(256, 3, padding='same', strides=1, activation='relu')(embed)
    cnn1 = MaxPooling1D(pool_size=48)(cnn1)
    cnn2 = Conv1D(256, 4, padding='same', strides=1, activation='relu')(embed)
    cnn2 = MaxPooling1D(pool_size=47)(cnn2)
    cnn3 = Conv1D(256, 5, padding='same', strides=1, activation='relu')(embed)
    cnn3 = MaxPooling1D(pool_size=46)(cnn3)
    # 合并三个模型的输出向量
    cnn = concatenate([cnn1, cnn2, cnn3], axis=-1)
    flat = Flatten()(cnn)
    drop = Dropout(0.2)(flat)
    main_output = Dense(11, activation='softmax')(drop)
    model = Model(inputs=main_input, outputs=main_output)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    one_hot_labels = keras.utils.np_utils.to_categorical(y_train, num_classes=11)  # 将标签转换为one-hot编码
    model.fit(x_train_padded_seqs, one_hot_labels, batch_size=800, epochs=15)
    #y_test_onehot = keras.utils.to_categorical(y_test, num_classes=3)  # 将标签转换为one-hot编码
    result = model.predict(x_test_padded_seqs)  # 预测样本属于每个类别的概率
    result_labels = np.argmax(result, axis=1)  # 获得最大概率对应的标签
    y_predict = list(map(str, result_labels))
    print('准确率', metrics.accuracy_score(y_test, y_predict))
    print('f1', metrics.f1_score(y_test, y_predict, average='weighted'))
    print('召回率', metrics.recall_score(y_test, y_predict,average='weighted'))

# THUCNews_x_train, THUCNews_y_train, THUCNews_x_test, THUCNews_y_test,THUCNews_vocab =  THUCNewsdata_process()
# TextCNN_model_1(THUCNews_x_train, THUCNews_y_train, THUCNews_x_test, THUCNews_y_test,THUCNews_vocab)