import numpy as np
import pandas as pd
import jieba
import gensim
import matplotlib.pyplot as plt
import re
from gensim.models import KeyedVectors
import warnings
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Dense, GRU, Embedding, LSTM, Bidirectional
from tensorflow.python.keras.preprocessing.text import Tokenizer
from tensorflow.python.keras.preprocessing.sequence import pad_sequences
from tensorflow.python.keras.optimizers import RMSprop
from tensorflow.python.keras.optimizers import Adam
from tensorflow.python.keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau
from tensorflow.python.keras.backend import set_session
from concurrent.futures import ThreadPoolExecutor

import tensorflow as tf
import pymysql
import datetime
import traceback
from flask import Flask
from threading import Thread

import uuid

global sess
global graph
graph = tf.get_default_graph()
sess = tf.Session()
warnings.filterwarnings("ignore")

# 创建线程池执行器
executor = ThreadPoolExecutor(8)

# flask 报错添加
# 建立一个flask 服务器
app = Flask("my_flask")
sum_number1 = 0
sum_number2 = 0
num_words = 50000
max_tokens = 226
# 加载语料库
cn_model = KeyedVectors.load_word2vec_format("Chinese-Word-Vectors-master/sgns.zhihu.bigram"
                                             , binary=False)
embedding_dim = cn_model['山东大学'].shape[0]
# 初始化embedding_matrix
embedding_matrix = np.zeros((num_words, embedding_dim))
# embedding_matrix 为一个[num_words，embedding_dim] 的矩阵
# 维度50000*300
for i in range(num_words):
    embedding_matrix[i, :] = cn_model[cn_model.index2word[i]]
embedding_matrix = embedding_matrix.astype('float32')
# 加载训练完毕的模型
model = Sequential()
model.add(Embedding(num_words, embedding_dim, weights=[embedding_matrix],
                    input_length=max_tokens, trainable=False))
model.add(Bidirectional(LSTM(units=32, return_sequences=True)))
model.add(LSTM(units=16, return_sequences=False))

model.add(Dense(1, activation='sigmoid'))
optimizer = Adam(lr=1e-3)
model.compile(loss='binary_crossentropy',
              optimizer=optimizer,
              metrics=['accuracy'])
set_session(sess)
model.load_weights("sentiment_checkpoint.keras")


def predict_sentiment(text, guid):
    text = re.sub("[\s+\.\!\/_,$%^&*(+\"\']+|[+——！，。？、~@#￥%……&*（）]", "", text)
    # 分词
    cut = jieba.cut(text)
    cut_list = [i for i in cut]
    # tokenize
    for i, word in enumerate(cut_list):
        try:
            cut_list[i] = cn_model.vocab[word].index
        except KeyError:
            cut_list[i] = 0
    # padding
    tokens_pad = pad_sequences([cut_list], maxlen=max_tokens,
                               padding='pre', truncating='pre')
    set_session(sess)
    model.load_weights("sentiment_checkpoint.keras")
    # 预测
    with graph.as_default():
        set_session(sess)
        result = model.predict(x=tokens_pad)
    coef = result[0][0]
    global sum_number1, sum_number2
    file_path = "MessageFile/"
    if coef >= 0.5:
        sum_number1 += 1
        print('是一例正面评价', 'output=%.2f' % coef)
        file_path = file_path + "pos/" + str(guid)
        f = open(file_path, "w")
        f.write(text)
    else:
        sum_number2 += 1
        print('是一例负面评价', 'output=%.2f' % coef)
        file_path = file_path + "neg/" + str(guid)
        f = open(file_path, "w")
        f.write(text)


def Form_es_to_excel(index_url, size, goodsId):
    import csv
    from elasticsearch import Elasticsearch
    es = Elasticsearch(
        hosts=['es-74udramb.public.tencentelasticsearch.com'],
        http_auth=('elastic', '123ABCabc'),
        scheme='https',
        # sniff_on_start=True,  # 连接前测试
        # sniff_on_connection_fail=True,  # 节点无响应时刷新节点
        sniff_timeout=60,  # 设置超时时间
        ignore=[400, 405, 502]
    )
    query1 = {
        "size": size,
        "query": {
            "match": {
                "goodsId": goodsId
            }
        }
    }
    query = es.search(index=index_url, doc_type='_doc', scroll='5m', body=query1)
    # print(query)
    value = query["hits"]["hits"]
    print(value)
    # # es查询出的结果第一页
    results = query['hits']['hits']
    # # es查询出的结果总量
    total = query['hits']['total']['value']
    print(total)
    # # 游标用于输出es查询出的所有结果
    # print(total)
    scroll_id = query['_scroll_id']
    # # 在发送查询请求的时候,就告诉ES需要使用游标,并定义每次返回数据量的大小
    # # 定义一个list变量results用来存储数据结果,在代码中,可以另其为空list,即results=[],也可以先将返回结果
    # # 的第一页存尽进来, 即results = query['hits']['hits']
    # # 对于所有二级果数据写个分页加载到内存变量的循环
    for i in range(0, int(total / 100) + 1):
        #     # scroll参数必须制定否则会报错
        query_scroll = es.scroll(scroll_id=scroll_id, scroll="5m")['hits']['hits']
        results += query_scroll
    for res in results:
        print("开始情感分析:" + res['_source']['content'])
        predict_sentiment(res['_source']['content'], res['_source']['guid'])
    # with open("./data.csv", 'w', newline='', encoding="gbk") as flow:
    #     # 获取_source 下的所有字段名
    #     names = results[0]['_source'].keys()
    #     csv_writer = csv.writer(flow)
    #     csv_writer.writerow(names)
    #     for res in results:
    #         csv_writer.writerow(res['_source'].values())
    # print("done!")
    # setAnalysisToMySQL(goodsId)


@app.route('/')
def hello_world():
    return 'Hello World!'


@app.route('/setAnalysisMessage/<goodsid>')
def setAnalysisToMySQL(goodsid):
    print("线程:"+str(Thread.name)+"执行goodsId={}", goodsid)
    global sum_number1, sum_number2
    sum_number1 = 0
    sum_number2 = 0
    goodsId = goodsid
    Form_es_to_excel("user_message_list", 100, goodsId)
    # 把计算之后的结果存入到数据库中
    db = pymysql.connect(host="47.98.248.3", port=3307, user="root", password="123456", db="star_goods")
    cur = db.cursor()
    try:
        now_datetime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        # 转换为datetime
        datetime_now_real = datetime.datetime.strptime(now_datetime, '%Y-%m-%d %H:%M:%S')
        sql0 = "select count(*) from message_analysis where productId='{}'".format(goodsId)
        cur.execute(sql0)
        db.commit()
        my_result = cur.fetchall()
        sql = ""
        my_result = list(my_result[0])[0]
        print(my_result)
        if my_result > 0:
            sql = "update message_analysis set pos_sum={},neg_sum={},get_real_time='{}' where productId='{}'".format(
                sum_number1, sum_number2, datetime_now_real, goodsId)
        else:
            sql = "insert into message_analysis (productId,pos_sum,neg_sum,get_real_time) " \
                  "values('{}',{},{},'{}')".format(goodsId, sum_number1, sum_number2, datetime_now_real)
        print(sql)
        cur.execute(sql)
        db.commit()
        sql2 = "select * from message_analysis"
        cur.execute(sql2)
        db.commit()
        print("--------------当前数据库数据----------------")
        print(cur.fetchall())  # 打印所有数据
        print("------------------------------------------")
    except Exception:
        traceback.print_exc()
        print('插入失败，回滚~')
        db.rollback()
        return "false!"
    cur.close()
    db.close()
    return "success!"


@app.route('/train')
def train():
    # cn_model = KeyedVectors.load_word2vec_format("Chinese-Word-Vectors-master/sgns.zhihu.bigram"
    #                                              , binary=False)
    try:
        import os
        num1 = 0
        num2 = 0
        pos_txts = os.listdir('./MessageFile/pos')
        neg_txts = os.listdir('./MessageFile/neg')
        # 现在我们把所有的评价内容放到一个list中
        train_texts_orig = []  # 存储所有评价，每个评价为一条string
        # 前1000条为正面评价  后1000条为负面评价
        for i in range(len(pos_txts)):
            with open('./MessageFile/pos/' + pos_txts[i], 'r', errors='ignore') as f:
                num1 += 1
                text = f.read().strip()
                train_texts_orig.append(text)
                f.close()
        for i in range(len(neg_txts)):
            with open('./MessageFile/neg/' + neg_txts[i], 'r', errors='ignore') as f:
                num2 += 1
                text = f.read().strip()
                train_texts_orig.append(text)
                f.close()
        # 开始分词  和索引化
        # 得到所有的索引
        train_tokens = []
        for text in train_texts_orig:
            # 去掉标点
            text = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——！，。？、~@#￥%……&（）]+", "", text)
            # jieba 分词
            cut = jieba.cut(text)
            # jieba分词的输出结果为一个生成器
            # 把生成器转换为list
            cut_list = [i for i in cut]
            for i, word in enumerate(cut_list):
                try:
                    # 将词转换为索引index
                    cut_list[i] = cn_model.vocab[word].index
                except KeyError:
                    # 如果词不在字典中，则存入为0
                    cut_list[i] = 0
            train_tokens.append(cut_list)
            # 获得所有tokens的长度
            # 因为所有的索引的长度不一致 我们需要把这些一致化一下
        num_tokens = [len(tokens) for tokens in train_tokens]
        num_tokens = np.array(num_tokens)
        max_tokens = np.mean(num_tokens) + 2 * np.std(num_tokens)
        max_tokens = int(max_tokens)
        print("选择max_tokens=>" + str(max_tokens))
        num_words = 50000
        # 初始化embedding_matrix
        embedding_matrix = np.zeros((num_words, embedding_dim))
        # embedding_matrix 为一个[num_words，embedding_dim] 的矩阵
        # 维度50000*300
        for i in range(num_words):
            embedding_matrix[i, :] = cn_model[cn_model.index2word[i]]
        embedding_matrix = embedding_matrix.astype('float32')
        train_pad = pad_sequences(train_tokens, maxlen=max_tokens,
                                  padding='pre', truncating='pre')
        # 超出5万个词向量的词 用0代替
        train_pad[train_pad >= num_words] = 0
        # 可见padding之后前面的tokens全变为0，文本在最后
        print(train_pad[33])

        # 准备target向量
        train_target = np.concatenate((np.ones(num1), np.zeros(num2)))
        print("-----------------")
        print(train_pad.shape)
        print("-----------------")
        from sklearn.model_selection import train_test_split

        X_train, X_test, y_train, y_test = train_test_split(train_pad,
                                                            train_target,
                                                            test_size=0.1,
                                                            random_state=12)
        # 开始搭建LSTM模型

        model = Sequential()
        model.add(Embedding(num_words, embedding_dim, weights=[embedding_matrix],
                            input_length=max_tokens, trainable=False))
        model.add(Bidirectional(LSTM(units=32, return_sequences=True)))
        model.add(LSTM(units=16, return_sequences=False))

        model.add(Dense(1, activation='sigmoid'))
        optimizer = Adam(lr=1e-3)
        model.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])
        # 建立一个权重的存储点
        path_checkpoint = 'sentiment_checkpoint.keras'
        checkpoint = ModelCheckpoint(filepath=path_checkpoint, monitor='val_loss',
                                     verbose=1, save_weights_only=True,
                                     save_best_only=True)
        # 尝试加载已训练模型
        try:
            model.load_weights(path_checkpoint)
        except Exception as e:
            print(e)
        # 定义early stopping 如果3个epoch内validation loss没有改善则停止训练
        early_stopping = EarlyStopping(monitor='val_loss', patience=3, verbose=1)
        # 自动降低learning rate
        lr_reduction = ReduceLROnPlateau(monitor='val_loss',
                                         factor=0.1, min_lr=1e-5, patience=0,
                                         verbose=1)
        # 定义callback函数
        callback = [
            early_stopping,
            checkpoint,
            lr_reduction
        ]
        # 开始训练
        model.fit(X_train, y_train,
                  validation_split=0.1,
                  epochs=20,
                  batch_size=128,
                  callbacks=callback)
        result = model.evaluate(X_test, y_test)
        print('Accuracy:{0:.2%}'.format(result[1]))

        from elasticsearch import Elasticsearch
        es = Elasticsearch(
            hosts=['es-74udramb.public.tencentelasticsearch.com'],
            http_auth=('elastic', '123ABCabc'),
            scheme='https',
            # sniff_on_start=True,  # 连接前测试
            # sniff_on_connection_fail=True,  # 节点无响应时刷新节点
            sniff_timeout=60,  # 设置超时时间
            ignore=[400, 405, 502]
        )
        idSet = []
        for i in range(100000):
            try:
                query1 = {
                    "query": {
                        "match_all": {
                        }
                    },
                    "track_total_hits": True,
                    "size": 1000,
                    "from": i
                }
                query = es.search(index="user_message_list", doc_type='_doc', scroll='5m', body=query1)
                results = query['hits']['hits']
                print(results)
                for result in results:
                    if result['_source']['goodsId'] not in idSet:
                        idSet.append(result['_source']['goodsId'])
            except:
                break
        idSet = list(set(idSet))
        print(idSet)
        for result in idSet:
            print("开始分析goodsId={" + result + "}的评论")
            executor.submit(setAnalysisToMySQL, result)
            # setAnalysisToMySQL(result)
    except:
        return "error"
    return "success"


def Form_es_to_excel(index_url, size, goodsId):
    import csv
    from elasticsearch import Elasticsearch
    es = Elasticsearch(
        hosts=['es-74udramb.public.tencentelasticsearch.com'],
        http_auth=('elastic', '123ABCabc'),
        scheme='https',
        # sniff_on_start=True,  # 连接前测试
        # sniff_on_connection_fail=True,  # 节点无响应时刷新节点
        sniff_timeout=60,  # 设置超时时间
        ignore=[400, 405, 502]
    )
    query1 = {
        "size": size,
        "query": {
            "match": {
                "goodsId": goodsId
            }
        }
    }
    query = es.search(index=index_url, doc_type='_doc', scroll='5m', body=query1)
    # print(query)
    value = query["hits"]["hits"]
    print(value)
    # # es查询出的结果第一页
    results = query['hits']['hits']
    # # es查询出的结果总量
    total = query['hits']['total']['value']
    print(total)
    # # 游标用于输出es查询出的所有结果
    # print(total)
    scroll_id = query['_scroll_id']
    # # 在发送查询请求的时候,就告诉ES需要使用游标,并定义每次返回数据量的大小
    # # 定义一个list变量results用来存储数据结果,在代码中,可以另其为空list,即results=[],也可以先将返回结果
    # # 的第一页存尽进来, 即results = query['hits']['hits']
    # # 对于所有二级果数据写个分页加载到内存变量的循环
    for i in range(0, int(total / 100) + 1):
        #     # scroll参数必须制定否则会报错
        query_scroll = es.scroll(scroll_id=scroll_id, scroll="5m")['hits']['hits']
        results += query_scroll
    for res in results:
        print("----开始:" + res['_source']['content'])
        predict_sentiment(res['_source']['content'], res['_source']['guid'])


# 然后我们进行反索引化 69803993671
# 把索引转换为可阅读的文本
def reverse_tokens(tokens):
    text = ''
    for i in tokens:
        if i != 0:
            text = text + cn_model.index2word[i]
        else:
            text = text + ''
    return text


if __name__ == '__main__':
    # 允许访问的服务器
    # 开启服务器
    app.run(host='0.0.0.0', port=9218)
    # train()
