# coding=utf-8
import pickle
import re
import jieba as jb
from keras.layers import Input, Embedding, LSTM, Dense, Lambda, Activation, TimeDistributed, SpatialDropout1D, Flatten, RepeatVector
from keras.layers.wrappers import Bidirectional
from keras.models import Sequential
from keras.preprocessing.sequence import pad_sequences
import time
import pandas as pd
import numpy as np
import pymysql

# 设置最频繁使用的50000个词(在texts_to_matrix是会取前MAX_NB_WORDS,会取前MAX_NB_WORDS列)
MAX_NB_WORDS = 50000
# 每条cut_review最大的长度
MAX_SEQUENCE_LENGTH = 250
# 设置Embeddingceng层的维度
EMBEDDING_DIM = 100

# 定义删除除字母,数字，汉字以外的所有符号的函数
def remove_punctuation(line):
    line = str(line)
    if line.strip() == '':
        return ''
    rule = re.compile(u"[^a-zA-Z0-9\u4E00-\u9FA5]")
    line = rule.sub('', line)
    return line

# 定义停用词的加载函数
def stopwordslist(filepath):
    stopwords = [line.strip() for line in open(
        filepath, 'r', encoding='utf-8').readlines()]
    return stopwords


# 定义双向长短期记忆网络
def define_BiLSTM():
    model = Sequential()
    model.add(Embedding(MAX_NB_WORDS, EMBEDDING_DIM, input_length=250))
    model.add(SpatialDropout1D(0.2))
    # # '''双向LSTM'''
    model.add(Bidirectional(LSTM(100, dropout=0.2, recurrent_dropout=0.2, return_sequences=True), merge_mode='concat'))
    # # model.add(Bidirectional(LSTM(100, dropout=0.2, recurrent_dropout=0.2, return_sequences=True)))
    # model.add(TimeDistributed(Dense(1)))
    model.add(Flatten())
    # # 2分类模型输出
    model.add(Dense(2, activation='sigmoid'))

    # 模型编译
    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
    # print(model.summary())
    return model

# 分析评论结果并写入数据库
def analysis_comment(a):
    model_LSTM = define_BiLSTM() 
    model_LSTM.load_weights(f"weights.best_LSTM_10.4.hdf5")
    stopwords = stopwordslist("./stoplist.txt")
    txt = remove_punctuation(a)
    # txt = [" ".join([w for w in list(jb.cut(txt)) if w not in stopwords])]
    jb_txt = list(jb.cut(txt))
    txt = []
    for w in jb_txt:
        if w not in stopwords:
            txt.append(w)
        else:
            continue
    txt = [" ".join(txt)]
    with open('tokenizer.pickle', 'rb') as handle:
        tokenizer = pickle.load(handle)
    seq = tokenizer.texts_to_sequences(txt)
    padded = pad_sequences(seq, maxlen=MAX_SEQUENCE_LENGTH)
    pred = model_LSTM.predict(padded)
    cat_id = pred.argmax(axis=1)[0]
    if (cat_id == 0):
        senti_analysis_result = '好评'
    else:
        senti_analysis_result = '差评'
    analysis_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
    print("情感分析结果:", senti_analysis_result)
    conn = pymysql.connect(host='47.100.201.211', port=3306, user='root', password='iyGfLR64Ne4Ddhk7',
                           database='data', charset='utf8')
    cursor = conn.cursor()
    try:
        conn.ping(reconnect=True)
        infs = [a, senti_analysis_result, analysis_time]
        query = 'insert into data.comment_sentiment values (%s,%s,%s)'
        cursor.execute(query, infs)
        conn.commit()
    except Exception as e:
        # 报错事务回滚
        conn.rollback()
        print(e)
    print("Write to MySQL successfully!")
    # 关闭光标对象
    cursor.close()
    # 关闭数据库连接
    conn.close()

if __name__ == '__main__':
    while True:
        try:
            a = input("请输入评论内容: ")
            print('正在进行情感分析...')
            analysis_comment(a)
        except KeyError as err:
            print("您输入的句子有汉字不在词汇表中，请重新输入！")
            print("不在词汇表中的单词为：%s." % err)
            continue




        