import numpy as np
import matplotlib.pyplot as plt
import re
import jieba # 结巴分词
# gensim用来加载预训练word vector
from gensim.models import KeyedVectors
from utils import Utils
import warnings
from sklearn.metrics import confusion_matrix, accuracy_score, precision_score, recall_score,f1_score
from sklearn.model_selection import train_test_split
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, GRU, Embedding, LSTM, Bidirectional, Dropout
from tensorflow.python.keras.preprocessing.text import Tokenizer
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.optimizers import RMSprop
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau
import tensorflow as tf
# 使用gensim加载预训练中文分词embedding, 有可能需要等待1-2分钟
cn_model = KeyedVectors.load_word2vec_format('embeddings/sgns.zhihu.bigram',
                                             binary=False, unicode_errors="ignore")

#加载数据集和停用词
util = Utils()
stop_words_path = "./data/stop_words.txt"
data_path = "./data/online_shopping_10_cats.csv"
stop_words = util.lodeStopWords(stop_words_path)
x_data , y_data = util.load_corpus(data_path,"utf-8")

#进行jieba分词、去除停用词、将词转换为索引操作
# 进行分词和tokenize
# train_tokens是一个长长的list，其中含有4000个小list，对应每一条评价
train_tokens = [] #二维数组，行表示评论，列表示词的索引，x[i][j]:第i条评论的第j个词的索引值
for text in x_data:
    # 去掉标点
    text = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——！，。？、~@#￥%……&*（）]+", "",text)
    # 结巴分词
    cut = jieba.cut(text)
    # 结巴分词的输出结果为一个生成器
    # 把生成器转换为list
    cut_list = [ i for i in cut if i not in stop_words ] #变成[word1,word2,word3,...]的形式 ；注意不同的评论cut_list的长度是不同的，因为评论内容的大小不同，词也就不同
    for i, word in enumerate(cut_list):
        try:
            # 将词转换为索引index,就是在模型中的索引位置
#             cut_list[i] = cn_model.vocab[word].index 已经被弃用了
            cut_list[i] = cn_model.key_to_index[word]
        except KeyError:
            # 如果词不在字典中，则输出0
            cut_list[i] = 0
    train_tokens.append(cut_list)

max_tokens = 169 #评论的长度统一为为169
embedding_dim = cn_model[0].shape[0]  #维数由预训练模型决定，现在是300维

# 只使用前50000个词
num_words = 50000
# 初始化embedding_matrix，之后在keras上进行应用
#embedding_matrix是我们后面使用的词向量矩阵，他是cn_model的一部分，也就是cn_model的前50000行形成的矩阵
#每一行代表一个词，也就是说我们只使用5w个词，不属于这5w词中的任何词，都直接忽略
# embedding_matrix为一个 [num_words，embedding_dim] 的矩阵
# 维度为 50000 * 300
embedding_matrix = np.zeros((num_words, embedding_dim)) #初始化一个300*50000的矩阵
for i in range(num_words):
#     embedding_matrix[i,:] = cn_model[cn_model.index2word[i]]    index2word已经过期
     embedding_matrix[i,:] = cn_model[cn_model.index_to_key[i]]
embedding_matrix = embedding_matrix.astype('float32')

# 进行padding和truncating， 输入的train_tokens是一个list
# 返回的train_pad是一个numpy array
# sequences：浮点数或整数构成的两层嵌套列表 ：train_tokens
# maxlen：None或整数，为序列的最大长度。大于此长度的序列将被截短，小于此长度的序列将在后部填0.
# dtype：返回的numpy array的数据类型
# padding：‘pre’或‘post’，确定当需要补0时，在序列的起始还是结尾补`
# truncating：‘pre’或‘post’，确定当需要截断序列时，从起始还是结尾截断
# value：浮点数，此值将在填充时代替默认的填充值0
train_pad = pad_sequences(train_tokens, maxlen=max_tokens,)
# 超出五万个词向量的词用0代替
train_pad[ train_pad>=num_words ] = 0

# 20%的样本用来训练，剩余10%用来测试
X_train, X_test, y_train, y_test = train_test_split(train_pad,
                                                    y_data,
                                                    test_size=0.1,
                                                    random_state=12)

# 用LSTM对样本进行分类
model = Sequential()
# 模型第一层为embedding
model.add(Embedding(num_words,
                    embedding_dim,
                    weights=[embedding_matrix],
                    input_length=max_tokens,
                    trainable=False))

model.add(Bidirectional(LSTM(units=64, return_sequences=True,dropout=0.5)))
model.add(LSTM(units=16, return_sequences=False,activation="sigmoid",dropout=0.5))
# model.append(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
# 我们使用adam以0.001的learning rate进行优化
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3)
model.compile(loss='binary_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])  # 评价函数
# 我们来看一下模型的结构，一共90k左右可训练的变量
model.summary()

# 建立一个权重的存储点
path_checkpoint = 'sentiment_checkpoint.keras'
checkpoint = ModelCheckpoint(filepath=path_checkpoint, monitor='val_loss',
                                      verbose=1, save_weights_only=True,
                                      save_best_only=True)

# 尝试加载已训练模型
try:
    model.load_weights(path_checkpoint)
except Exception as e:
    print(e)

# 定义early stoping如果3个epoch内validation loss没有改善则停止训练
earlystopping = EarlyStopping(monitor='val_loss', patience=5, verbose=1)
# 自动降低learning rate
# 自动降低learning rate
lr_reduction = ReduceLROnPlateau(monitor='val_loss',
                                       factor=0.1, min_lr=1e-8, patience=0,
                                       verbose=1)

# 定义callback函数
callbacks = [
    earlystopping,
    checkpoint,
    lr_reduction
]

y_test = np.array(y_test) #验证集只支持array和另一种，不支持list
y_train = np.array(y_train)

# 开始训练
history = model.fit(X_train, y_train,
          # validation_split=0.1,
          epochs=20,
          validation_data=(X_test,y_test),
          batch_size=128,
          callbacks=callbacks)

model.save('./save_weights/lstm02.h5')

#评价模型
# 记录训练集和验证集的准确率和损失值
history_dict = history.history
train_loss = history_dict["loss"]  # 训练集损失值
train_accuracy = history_dict["accuracy"]  # 训练集准确率
val_loss = history_dict["val_loss"]  # 验证集损失值
val_accuracy = history_dict["val_accuracy"]  # 验证集准确率
epochs =  len(train_loss)
#画图。测试集和验证集都有
#损失函数图
plt.figure()
plt.plot(range(epochs), train_loss, label='train_loss')
plt.plot(range(epochs), val_loss, label='val_loss')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('loss')

#准确度
plt.figure()
plt.plot(range(epochs), train_accuracy, label='train_accuracy')
plt.plot(range(epochs), val_accuracy, label='val_accuracy')
plt.legend()
plt.xlabel('epochs')
plt.ylabel('accuracy')
plt.show()

y_pred = model.predict(X_test)
y_pred[y_pred >= 0.5] = 1
y_pred[y_pred < 0.5] = 0
y_pred_class = np.ones(len(y_pred),int)
index = 0
index = 0
for i in y_pred:
    y_pred_class[index] = i
    index = index +1
y_pred = y_pred_class

#输出训练模型后的混淆矩阵、精确率，召回率,准确率。
cm = confusion_matrix(y_test, y_pred)
print("混淆矩阵:",cm)
print("准确率:",accuracy_score(y_test,y_pred))
print("精确率:",precision_score(y_test,y_pred,average='binary'))
print("召回率:",recall_score(y_test,y_pred,average='binary'))
print("F1分数:",f1_score(y_test,y_pred,average='binary'))