#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2020/8/26 14:48
# @Author : way
# @Site :
# @Describe:
import warnings
warnings.filterwarnings("ignore")
import keras
import pandas as pd
import re
import numpy as np
import jieba
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
# 设置最频繁使用的50000个词
MAX_NB_WORDS = 50000
# 每条cut_review最大的长度
MAX_SEQUENCE_LENGTH = 250
# 设置Embeddingceng层的维度
EMBEDDING_DIM = 100
from flask import Flask, request, render_template
from data import SourceData
from data_corp import CorpData
from data_job import JobData
import sqlite3

app = Flask(__name__)

'''
定义了3个网址，用同一套模板渲染
'''
signal= False
path = './static/words/testcsv.csv'
file = pd.read_csv(path, encoding="utf-8")
cleaned_df_arr = np.array(list(file["0"]))
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(cleaned_df_arr)
my_model = keras.models.load_model("./static/my_model.h5")
def news():
    news_path="./static/database/news.db"
    conn = sqlite3.connect(news_path)
    cursor = conn.execute("SELECT  ID, TITLE, SOURCE, TIME from NEWS")
    title=[]
    time_t=[]
    source=[]
    for row in cursor:
        if 15<len(row[1])<25 and len(row[2])<9:
            title.append(row[1])
            source.append(row[2])
            time_t.append(row[3])
    temp=[]
    temp.append(title)
    temp.append(time_t)
    temp.append(source)
    return temp


def remove_punctuation(line):
    line = str(line)
    if line.strip() == '':
        return ''
    rule = re.compile(u"[^a-zA-Z0-9\u4E00-\u9FA5]")
    line = rule.sub('', line)
    return line


def stopwordslist(filepath):
    stopwords = [line.strip() for line in open(filepath, 'r', encoding='utf-8').readlines()]
    return stopwords


@app.route('/', methods=['POST', 'GET'])
def index():
    # 新建一个实例
    data = SourceData()
    global signal,tokenizer,my_model
    if(signal):
        if request.method == 'POST':
            print("post")
            # user = request.form['nm']#获取单个输入框中的内容
            temp_list = request.form  # post方式获取form表单中所有输入框中的数据
            print(temp_list['nm'])
            stopwords = stopwordslist("./static/words/cn_stopwords.txt")
            cat_id_df = pd.DataFrame([["自杀倾向", 0],
                                      ["青春期叛逆", 1],
                                      ["抑郁", 2],
                                      ["正常", 3],
                                      ],
                                     columns=["情感", "id"])
            def predict1(text):
                txt = remove_punctuation(text)
                txt = [" ".join([w for w in list(jieba.cut(txt)) if w not in stopwords])]
                seq = tokenizer.texts_to_sequences(txt)
                padded = pad_sequences(seq, maxlen=MAX_SEQUENCE_LENGTH)

                pred = my_model.predict(padded)

                id = pred.argmax(axis=1)[0]
                print(cat_id_df[cat_id_df.id == id]['情感'].values[0])
                return cat_id_df[cat_id_df.id == id]['情感'].values[0]

            return render_template('index0.html',emotion=predict1(temp_list['nm']),news=news(),test_data=temp_list['nm'], form=data, title=data.title)
    signal=True

    return render_template('index0.html',emotion="请输入语句!",news=news(),test_data="请输入语句", form=data, title=data.title)




if __name__ == "__main__":
    app.run(host='127.0.0.1', debug=False)

