#!usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import joblib
import jieba
import pickle
from numpy import *
from sklearn.feature_extraction.text import TfidfVectorizer
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import load_model
from datetime import timedelta
from flask import Flask
from flask import render_template
from flask import request

#预定义变量
MAX_SEQUENCE_LENGTH = 100    #最大序列长度

Cmodel = load_model('./model/cnn.h5')
Lmodel = load_model('./model/lstm.h5')
clf = joblib.load('./model/clf.pkl')
stopWord_path = "./stop/stopword.txt"  # 停用词路径

app = Flask(__name__)

def readFlie(path):    #读取一个样本的记录，默认一个文件一条样本
    with open(path,'r',errors='ignore') as file:
        content = file.read()
        file.close()
        return content

def readBunch(path):  #读取Bunch样本词对象
    with open(path, 'rb') as file:
        bunch = pickle.load(file)
    return bunch

def getStopWord(inputFile):  #获取停用词表
    stopWordList = readFlie(inputFile).splitlines()
    return stopWordList

def remove_punctuation(line):
    line = str(line)
    if line.strip()=='':
        return ''
    rule = re.compile(u"[^a-zA-Z0-9\u4E00-\u9FA5]")
    line = rule.sub('',line)
    return line

def num2Cate(num):
    return {
        0: "国家级",
        1: '省区级',
        2: '地区级' ,
    }.get(num, 'error')

@app.route('/')
def showIndex():
    '''
        前端展示页面
    '''
    return  render_template('index.html')

@app.route('/api/predict',methods=["GET", "POST"])
def predict():
    if request.method == "POST":
        title = request.form.get("title")
        model = request.form.get("model")
    else:
        title = request.args.get("title")
        model = request.args.get("model")

    if not title: # 没有输入内容，终止预测
        return False
    else:
        if model == "1":
            # 分词
            title = str(title).replace("\r\n", "".strip())
            cutResult = [" ".join(jieba.cut(title))]
            # 词频矩阵
            trainSet = readBunch("./dat_list/tfidfspace.dat")  # 导入训练集的词袋
            vectorizer = TfidfVectorizer(stop_words=getStopWord(stopWord_path), sublinear_tf=True, max_df=0.5,
                                         vocabulary=trainSet.vocabulary)  # 使用训练集词袋向量
            testSpace_tdm = vectorizer.fit_transform(cutResult)  # TF-IDF稀疏矩阵
            # 预测
            predict_list = clf.predict(testSpace_tdm)
            # print(title, "— 预测结果 >>>", predict_list[0])
            return predict_list[0]
        else:
            stopwords = getStopWord(stopWord_path)
            tokenizer = Tokenizer()
            txt = remove_punctuation(title)
            txt = [" ".join([w for w in list(jieba.cut(txt)) if w not in stopwords])]
            tokenizer.fit_on_texts(txt)
            seq = tokenizer.texts_to_sequences(txt)
            padded = pad_sequences(seq, maxlen=MAX_SEQUENCE_LENGTH)
            if model == "2":
                pred = Lmodel.predict(padded)
            if model == "3":
                pred = Cmodel.predict(padded)
            cate = pred.argmax(axis=1)[0]
            result = num2Cate(cate)
            return result

if __name__ == '__main__':
    app.jinja_env.auto_reload = True
    app.config['SEND_FILE_MAX_AGE_DEAFAULT'] = timedelta(seconds=1)
    app.config['TEMPLATES_AUTO_RELOAD'] = True
    app.run(host='127.0.0.1', port=5000, debug=True)




