import os
import re
import jieba
from numpy import *
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import load_model
from flask import Flask
from flask import render_template
from flask import request

#预定义变量
MAX_SEQUENCE_LENGTH = 100    #最大序列长度

model = load_model('lstm.h5')
stopWord_path = "./stop/stopword.txt"  # 停用词路径

app = Flask(__name__)

def readFlie(path):    #读取一个样本的记录，默认一个文件一条样本
    with open(path,'r',errors='ignore') as file:
        content = file.read()
        file.close()
        return content

def getStopWord(inputFile):  #获取停用词表
    stopWordList = readFlie(inputFile).splitlines()
    return stopWordList

def remove_punctuation(line):
    line = str(line)
    if line.strip()=='':
        return ''
    rule = re.compile(u"[^a-zA-Z0-9\u4E00-\u9FA5]")
    line = rule.sub('',line)
    return line

def num2Cate(num):
    return {
        '0': "国家级",
        '1': '省区级',
        '2':'地区级' ,
    }.get(num, 'error')

@app.route('/')
def showIndex():
    '''
        前端展示页面
    '''
    return  render_template('index.html')

@app.route('/api/predict',methods=["GET", "POST"])
def predict():
    if request.method == "POST":
        title = request.form.get("title")
    else:
        title = request.args.get("title")

    if not title: # 没有输入内容，终止预测
        return False
    else:
        stopwords = getStopWord(stopWord_path)
        tokenizer = Tokenizer()
        txt = remove_punctuation(title)
        txt = [" ".join([w for w in list(jieba.cut(txt)) if w not in stopwords])]
        tokenizer.fit_on_texts(txt)
        seq = tokenizer.texts_to_sequences(txt)
        padded = pad_sequences(seq, maxlen=MAX_SEQUENCE_LENGTH)
        pred = model.predict(padded)
        cate = pred.argmax(axis=1)
        return cate

if __name__ == '__main__':
    app.run(host='127.0.0.1', debug=True)