import time
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
import sys

# 将自定义模块所在的目录加入到搜索目录中
sys.path.append('../lib/')
from lib import text_classification_utils as utils
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import recall_score, precision_score, f1_score

print("数据集处理中")
data = utils.load_thucnews()
THUCNews_data = pd.DataFrame(data, columns=["words", "sentiment"])
THUCNews_copy = THUCNews_data.copy()
index_THUCNews = THUCNews_copy['words'].count()
for i in range(int(index_THUCNews)):
    res = 0
    temp = THUCNews_copy['sentiment'][i]
    if temp == '体育':
        res = 1
    elif temp == '娱乐':
        res = 2
    elif temp == '家具':
        res = 3
    elif temp == '房产':
        res = 4
    elif temp == '教育':
        res = 5
    elif temp == '时尚':
        res = 6
    elif temp == '时政':
        res = 7
    elif temp == '游戏':
        res = 8
    elif temp == '科技':
        res = 8
    else:
        res = 10
    THUCNews_copy['sentiment'][i] = res

THUCNews_copy['sentiment'] = THUCNews_copy['sentiment'].astype('int')
# TfidfVectorizer()函数
# 统计某训练文本中，某个词的出现次数
THUC_vectorizer = TfidfVectorizer(token_pattern='\[?\w+\]?', stop_words=utils.chinese_stopwords)
THUCNews_words = THUC_vectorizer.fit_transform(THUCNews_copy["words"])
THUCNews_sentiment = THUCNews_copy["sentiment"]

IMDB_data = utils.load_IMDBDatas()
IMDB_data.columns = ['words', 'sentiment']
IMDB_copy = IMDB_data.copy()
index_IMDB = IMDB_copy['words'].count()
for i in range(int(index_IMDB)):
    res = 0
    temp = IMDB_copy['sentiment'][i]
    if temp == 'positive':
        res = 1
    else:
        res = 2
    IMDB_copy['sentiment'][i] = res
IMDB_copy['sentiment'] = IMDB_copy['sentiment'].astype('int')
# TfidfVectorizer()函数
# 统计某训练文本中，某个词的出现次数
IMDB_vectorizer = TfidfVectorizer(token_pattern='\[?\w+\]?', stop_words=utils.english_stopwords)
IMDB_words = IMDB_vectorizer.fit_transform(IMDB_copy["words"])
IMDB_sentiment = IMDB_copy["sentiment"]

THUCNews_train_words, THUCNews_test_words, THUCNews_train_sentiment, THUCNews_test_sentiment = train_test_split(
    THUCNews_words, THUCNews_sentiment)

IMDBData_train_words, IMDBData_test_words, IMDBData_train_sentiment, IMDBData_test_sentiment = train_test_split(
    IMDB_words, IMDB_sentiment)
print("数据集处理完成")

print("linear_model模型")
from sklearn import linear_model

###清华数据集
lr_THUCNews = linear_model.LogisticRegression()
lr_THUCNews.fit(THUCNews_train_words, THUCNews_train_sentiment)
# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。
lr_THUCNews_pred = cross_val_predict(lr_THUCNews, THUCNews_test_words, THUCNews_test_sentiment)
lr_precision_THUCNews = precision_score(THUCNews_test_sentiment, lr_THUCNews_pred, average='micro')
lr_recall_THUCNews = recall_score(THUCNews_test_sentiment, lr_THUCNews_pred, average='micro')
lr_f1_THUCNews = f1_score(THUCNews_test_sentiment, lr_THUCNews_pred, average='micro')

### IMDB数据集
lr_IMDBData = linear_model.LogisticRegression()
lr_IMDBData.fit(IMDBData_train_words, IMDBData_train_sentiment)
lr_IMDBData_pred = cross_val_predict(lr_IMDBData, IMDBData_test_words, IMDBData_test_sentiment)
lr_precision_IMDBData = precision_score(IMDBData_test_sentiment, lr_IMDBData_pred)
lr_recall_IMDBData = recall_score(IMDBData_test_sentiment, lr_IMDBData_pred)
lr_f1_IMDBData = f1_score(IMDBData_test_sentiment, lr_IMDBData_pred)

print("MultinomialNB模型")
from sklearn.naive_bayes import MultinomialNB

# 清华数据集
bayes_THUCNews = MultinomialNB()
bayes_THUCNews.fit(THUCNews_train_words, THUCNews_train_sentiment)
# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。
bayes_THUCNews_pred = cross_val_predict(bayes_THUCNews, THUCNews_test_words, THUCNews_test_sentiment)
bayes_precision_THUCNews = precision_score(THUCNews_test_sentiment, bayes_THUCNews_pred, average='micro')
bayes_recall_THUCNews = recall_score(THUCNews_test_sentiment, bayes_THUCNews_pred, average='micro')
bayes_f1_THUCNews = f1_score(THUCNews_test_sentiment, bayes_THUCNews_pred, average='micro')

### IMDB数据集
bayes_IMDBData = MultinomialNB()
bayes_IMDBData.fit(IMDBData_train_words, IMDBData_train_sentiment)

bayes_IMDBData_pred = cross_val_predict(bayes_IMDBData, IMDBData_test_words, IMDBData_test_sentiment)
bayes_precision_IMDBData = precision_score(IMDBData_test_sentiment, bayes_IMDBData_pred)
bayes_recall_IMDBData = recall_score(IMDBData_test_sentiment, bayes_IMDBData_pred)
bayes_f1_IMDBData = f1_score(IMDBData_test_sentiment, bayes_IMDBData_pred)

print("SVC模型")
from sklearn.svm import SVC

# 清华数据集
svc_THUCNews = SVC(kernel='rbf')
svc_THUCNews.fit(THUCNews_train_words, THUCNews_train_sentiment)
# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。
svc_THUCNews_pred = cross_val_predict(svc_THUCNews, THUCNews_test_words, THUCNews_test_sentiment)
svc_precision_THUCNews = precision_score(THUCNews_test_sentiment, svc_THUCNews_pred, average='micro')
svc_recall_THUCNews = recall_score(THUCNews_test_sentiment, svc_THUCNews_pred, average='micro')
svc_f1_THUCNews = f1_score(THUCNews_test_sentiment, svc_THUCNews_pred, average='micro')
# IMDB数据集
svc_IMDBData = SVC(kernel='rbf')
svc_IMDBData.fit(IMDBData_train_words, IMDBData_train_sentiment)
svc_IMDBData_pred = cross_val_predict(svc_IMDBData, IMDBData_test_words, IMDBData_test_sentiment)
svc_precision_IMDBData = precision_score(IMDBData_test_sentiment, svc_IMDBData_pred)
svc_recall_IMDBData = recall_score(IMDBData_test_sentiment, svc_IMDBData_pred)
svc_f1_IMDBData = f1_score(IMDBData_test_sentiment, svc_IMDBData_pred)

print("GBDT模型")
from sklearn.ensemble import GradientBoostingClassifier

# 清华数据集
gbdt_THUCNews = GradientBoostingClassifier()
gbdt_THUCNews.fit(THUCNews_train_words, THUCNews_train_sentiment)
# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。
gbdt_THUCNews_pred = cross_val_predict(gbdt_THUCNews, THUCNews_test_words, THUCNews_test_sentiment)
gbdt_precision_THUCNews = precision_score(THUCNews_test_sentiment, gbdt_THUCNews_pred, average='micro')
gbdt_recall_THUCNews = recall_score(THUCNews_test_sentiment, gbdt_THUCNews_pred, average='micro')
gbdt_f1_THUCNews = f1_score(THUCNews_test_sentiment, gbdt_THUCNews_pred, average='micro')

# IMDB数据集
gbdt_IMDBData = GradientBoostingClassifier()
gbdt_IMDBData.fit(IMDBData_train_words, IMDBData_train_sentiment)

gbdt_IMDBData_pred = cross_val_predict(gbdt_IMDBData, IMDBData_test_words, IMDBData_test_sentiment)
gbdt_precision_IMDBData = precision_score(IMDBData_test_sentiment, gbdt_IMDBData_pred)
gbdt_recall_IMDBData = recall_score(IMDBData_test_sentiment, gbdt_IMDBData_pred)
gbdt_f1_IMDBData = f1_score(IMDBData_test_sentiment, gbdt_IMDBData_pred)

print("SGD模型")
from sklearn.linear_model import SGDClassifier

sgd_THUCNews = SGDClassifier(random_state=1234)
sgd_THUCNews.fit(THUCNews_train_words, THUCNews_train_sentiment)
# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。
sgd_THUCNews_pred = cross_val_predict(sgd_THUCNews, THUCNews_test_words, THUCNews_test_sentiment)
sgd_precision_THUCNews = precision_score(THUCNews_test_sentiment, sgd_THUCNews_pred, average='micro')
sgd_recall_THUCNews = recall_score(THUCNews_test_sentiment, sgd_THUCNews_pred, average='micro')
sgd_f1_THUCNews = f1_score(THUCNews_test_sentiment, sgd_THUCNews_pred, average='micro')
# IMDB数据集
sgd_IMDBData = SGDClassifier(random_state=1234)
sgd_IMDBData.fit(IMDBData_train_words, IMDBData_train_sentiment)
sgd_IMDBData_pred = cross_val_predict(sgd_IMDBData, IMDBData_test_words, IMDBData_test_sentiment)
sgd_precision_IMDBData = precision_score(IMDBData_test_sentiment, sgd_IMDBData_pred)
sgd_recall_IMDBData = recall_score(IMDBData_test_sentiment, sgd_IMDBData_pred)
sgd_f1_IMDBData = f1_score(IMDBData_test_sentiment, sgd_IMDBData_pred)

print("RandomForest模型")
from sklearn.ensemble import RandomForestClassifier

rfc_THUCNews = RandomForestClassifier()
rfc_THUCNews.fit(THUCNews_train_words, THUCNews_train_sentiment)
# average参数定义了该指标的计算方法，二分类时average参数默认是binary；多分类时，可选参数有micro、macro、weighted和samples。
rfc_THUCNews_pred = cross_val_predict(rfc_THUCNews, THUCNews_test_words, THUCNews_test_sentiment)
rfc_precision_THUCNews = precision_score(THUCNews_test_sentiment, rfc_THUCNews_pred, average='micro')
rfc_recall_THUCNews = recall_score(THUCNews_test_sentiment, rfc_THUCNews_pred, average='micro')
rfc_f1_THUCNews = f1_score(THUCNews_test_sentiment, rfc_THUCNews_pred, average='micro')
# IMDB数据集
rfc_IMDBData = RandomForestClassifier()
rfc_IMDBData.fit(IMDBData_train_words, IMDBData_train_sentiment)
rfc_IMDBData_pred = cross_val_predict(rfc_IMDBData, IMDBData_test_words, IMDBData_test_sentiment)
rfc_precision_IMDBData = precision_score(IMDBData_test_sentiment, rfc_IMDBData_pred)
rfc_recall_IMDBData = recall_score(IMDBData_test_sentiment, rfc_IMDBData_pred)
rfc_f1_IMDBData = f1_score(IMDBData_test_sentiment, rfc_IMDBData_pred)

print("CNN模型")
import pandas as pd
import jieba
import keras
from keras.layers.merge import concatenate
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers.embeddings import Embedding
from keras.layers import Conv1D, MaxPooling1D, Flatten, Dropout, Dense, Input
from keras.models import Model
from sklearn.model_selection import train_test_split
from sklearn import metrics
import numpy as np
from keras.models import Sequential
from keras.layers import BatchNormalization


# THUCNews数据集
# 数据预处理
def THUCNewsdata_process(max_len=200):  # path为句子的存储路径，max_len为句子的固定长度
    dataset = THUCNews_copy.astype('str')
    cw = lambda x: list(jieba.cut(x))  # 定义分词函数
    dataset['words'] = dataset['words'].apply(cw)  # 将句子进行分词
    tokenizer = Tokenizer()  # 创建一个Tokenizer对象，将一个词转换为正整数
    tokenizer.fit_on_texts(dataset['words'])  # 将词编号，词频越大，编号越小
    vocab = tokenizer.word_index  # 得到每个词的编号
    x_train, x_test, y_train, y_test = train_test_split(dataset['words'], dataset['sentiment'], test_size=0.1)  # 划分数据集
    x_train_word_ids = tokenizer.texts_to_sequences(x_train)  # 将测试集列表中每个词转换为数字
    x_test_word_ids = tokenizer.texts_to_sequences(x_test)  # 将训练集列表中每个词转换为数字
    x_train_padded_seqs = pad_sequences(x_train_word_ids, maxlen=max_len)  # 将每个句子设置为等长，每句默认为200
    x_test_padded_seqs = pad_sequences(x_test_word_ids, maxlen=max_len)  # 将超过固定值的部分截掉，不足的在最前面用0填充
    return x_train_padded_seqs, y_train, x_test_padded_seqs, y_test, vocab


# 构建CNN分类模型(LeNet-5)
# 模型结构：嵌入-卷积池化*2-dropout-BN-全连接-dropout-全连接
def CNN_model_THUCNews(x_train_padded_seqs, y_train, x_test_padded_seqs, y_test, vocab):
    model = Sequential()
    model.add(Embedding(len(vocab) + 1, 300, input_length=200))  # 使用Embeeding层将每个词编码转换为词向量
    model.add(Conv1D(256, 5, padding='same'))
    model.add(MaxPooling1D(11, 11, padding='same'))
    model.add(Conv1D(128, 5, padding='same'))
    model.add(MaxPooling1D(11, 11, padding='same'))
    model.add(Conv1D(64, 11, padding='same'))
    model.add(Flatten())
    model.add(Dropout(0.1))
    model.add(BatchNormalization())  # (批)规范化层
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(11, activation='softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    one_hot_labels = keras.utils.np_utils.to_categorical(y_train, num_classes=11)  # 将标签转换为one-hot编码
    model.fit(x_train_padded_seqs, one_hot_labels, epochs=5, batch_size=800)
    predict_x = model.predict(x_test_padded_seqs)
    y_predict = np.argmax(predict_x, axis=1)
    # y_predict = model.predict_classes(x_test_padded_seqs)  # 预测的是类别，结果就是类别号
    y_predict = list(map(str, y_predict))
    cnn_precision_THUCNews = metrics.accuracy_score(y_test, y_predict)
    cnn_recall_THUCNews = metrics.recall_score(y_test, y_predict, average='weighted')
    cnn_f1_THUCNews = metrics.f1_score(y_test, y_predict, average='weighted')
    return cnn_precision_THUCNews, cnn_recall_THUCNews, cnn_f1_THUCNews


THUCNews_x_train, THUCNews_y_train, THUCNews_x_test, THUCNews_y_test, THUCNews_vocab = THUCNewsdata_process()
cnn_precision_THUCNews, cnn_recall_THUCNews, cnn_f1_THUCNews = CNN_model_THUCNews(THUCNews_x_train, THUCNews_y_train,
                                                                                  THUCNews_x_test, THUCNews_y_test,
                                                                                  THUCNews_vocab)


# IMDB数据集
# 数据预处理
def IMDBdata_process(max_len=200):  # path为句子的存储路径，max_len为句子的固定长度
    dataset = IMDB_copy.astype('str')
    cw = lambda x: list(jieba.cut(x))  # 定义分词函数
    dataset['words'] = dataset['words'].apply(cw)  # 将句子进行分词
    tokenizer = Tokenizer()  # 创建一个Tokenizer对象，将一个词转换为正整数
    tokenizer.fit_on_texts(dataset['words'])  # 将词编号，词频越大，编号越小
    vocab = tokenizer.word_index  # 得到每个词的编号
    x_train, x_test, y_train, y_test = train_test_split(dataset['words'], dataset['sentiment'], test_size=0.1)  # 划分数据集
    x_train_word_ids = tokenizer.texts_to_sequences(x_train)  # 将测试集列表中每个词转换为数字
    x_test_word_ids = tokenizer.texts_to_sequences(x_test)  # 将训练集列表中每个词转换为数字
    x_train_padded_seqs = pad_sequences(x_train_word_ids, maxlen=max_len)  # 将每个句子设置为等长，每句默认为50
    x_test_padded_seqs = pad_sequences(x_test_word_ids, maxlen=max_len)  # 将超过固定值的部分截掉，不足的在最前面用0填充
    return x_train_padded_seqs, y_train, x_test_padded_seqs, y_test, vocab


# 构建TextCNN模型
def CNN_model_IMDB(x_train, y_train, x_test, y_test):
    main_input = Input(shape=(200,), dtype='float64')
    # 嵌入层（使用预训练的词向量）
    embedder = Embedding(len(IMDB_vocab) + 1, 300, input_length=50, trainable=False)
    embed = embedder(main_input)
    # 卷积层和池化层，设置卷积核大小分别为3,4,5
    cnn1 = Conv1D(256, 3, padding='same', strides=1, activation='relu')(embed)
    cnn1 = MaxPooling1D(pool_size=48)(cnn1)
    cnn2 = Conv1D(256, 4, padding='same', strides=1, activation='relu')(embed)
    cnn2 = MaxPooling1D(pool_size=47)(cnn2)
    cnn3 = Conv1D(256, 5, padding='same', strides=1, activation='relu')(embed)
    cnn3 = MaxPooling1D(pool_size=46)(cnn3)
    # 合并三个模型的输出向量
    cnn = concatenate([cnn1, cnn2, cnn3], axis=-1)
    flat = Flatten()(cnn)
    drop = Dropout(0.2)(flat)  # 在池化层到全连接层之前可以加上dropout防止过拟合
    main_output = Dense(3, activation='softmax')(drop)
    model = Model(inputs=main_input, outputs=main_output)
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    one_hot_labels = keras.utils.np_utils.to_categorical(y_train, num_classes=3)  # 将标签转换为one-hot编码
    model.fit(x_train, one_hot_labels, batch_size=800, epochs=5)
    result = model.predict(x_test)  # 预测样本属于每个类别的概率
    result_labels = np.argmax(result, axis=1)  # 获得最大概率对应的标签
    y_predict = list(map(str, result_labels))
    cnn_precision_IMDB = metrics.accuracy_score(y_test, y_predict)
    cnn_recall_IMDB = metrics.recall_score(y_test, y_predict, average='weighted')
    cnn_f1_IMDB = metrics.f1_score(y_test, y_predict, average='weighted')
    return cnn_precision_IMDB, cnn_recall_IMDB, cnn_f1_IMDB


IMDB_x_train, IMDB_y_train, IMDB_x_test, IMDB_y_test, IMDB_vocab = IMDBdata_process()
cnn_precision_IMDB, cnn_recall_IMDB, cnn_f1_IMDB = CNN_model_IMDB(IMDB_x_train, IMDB_y_train, IMDB_x_test, IMDB_y_test)

print("输出excel对比表格")
THUCNews_precision_list = [lr_precision_THUCNews, bayes_precision_THUCNews, svc_precision_THUCNews,
                           gbdt_precision_THUCNews, sgd_precision_THUCNews, rfc_precision_THUCNews,
                           cnn_precision_THUCNews]
THUCNews_recall_list = [lr_recall_THUCNews, bayes_recall_THUCNews, svc_recall_THUCNews, gbdt_recall_THUCNews,
                        sgd_recall_THUCNews, rfc_recall_THUCNews, cnn_recall_THUCNews]
THUCNews_f1_list = [lr_f1_THUCNews, bayes_f1_THUCNews, svc_f1_THUCNews, gbdt_f1_THUCNews, sgd_recall_THUCNews,
                    rfc_f1_THUCNews, cnn_f1_THUCNews]
IMDBData_precision_list = [lr_precision_IMDBData, bayes_precision_IMDBData, svc_precision_IMDBData,
                           gbdt_precision_IMDBData, sgd_precision_IMDBData, rfc_precision_IMDBData, cnn_precision_IMDB]
IMDBData_recall_list = [lr_recall_IMDBData, bayes_recall_IMDBData, svc_recall_IMDBData, gbdt_recall_IMDBData,
                        sgd_recall_IMDBData, rfc_recall_IMDBData, cnn_recall_IMDB]
IMDBData_f1_list = [lr_f1_IMDBData, bayes_f1_IMDBData, svc_f1_IMDBData, gbdt_f1_IMDBData, sgd_recall_IMDBData,
                    rfc_f1_IMDBData, cnn_f1_IMDB]
score = pd.DataFrame({'modelname': ['LogisticRegression', 'MultinomialNB', 'SVC', 'GradientBoostingClassifier',
                                    'SGDCLassifier', 'RandomForestClassifier', 'CNN'],
                      'THUCNews_precision': THUCNews_precision_list,
                      'THUCNews_recall': THUCNews_recall_list,
                      'THUCNews_f1': THUCNews_f1_list,
                      'IMDBData_precision': IMDBData_precision_list,
                      'IMDBData_recall': IMDBData_recall_list,
                      'IMDBData_f1': IMDBData_f1_list
                      })
score.to_excel('../db/text_classification_db/score.xlsx')