# coding:utf-8
from flask import Flask,request,jsonify
from keras.models import load_model
import numpy as np
import tensorflow as tf
import tensorflow.keras as kr
from keras.preprocessing import sequence
import codecs
import re
# import tensorflow as tf
import jieba
import pickle
import keras.backend as K
import os
import json
import warnings

warnings.filterwarnings("ignore")
os.environ["TF_CPP_MIN_LOG_LEVEL"]='3'
# os.environ['CUDA_VISIBLE_DEVICES'] = '2'
tokenizerPath = r".\tokenizer.pkl"
labelMapPath = r".\labelmap.json"
model_path = r".\dragon1_20191207_1752.h5"
maxLength = 980
with codecs.open(tokenizerPath,'rb') as tkFile:
    tkp = pickle.load(tkFile)
with codecs.open(labelMapPath, 'r', encoding='utf-8') as f:
    idToLabel = json.load(f)
model = load_model(model_path)
class WordCut(object):
    def __init__(self,stopwords_path=r"/data/soft/dbfind/txtseg/stopwords.txt"):
        self.stopwords = [line.strip() for line in codecs.open(stopwords_path, 'r', encoding='utf-8').readlines()]
    def addUserDict(self,UserDict):
        jieba.load_userdict(UserDict)
    def seg_sentence(self,sentence):
        sentence = re.sub('[\s+\[\],\"\':、.，。“”；：（）()]',"",sentence)
        sentence_seged = jieba.cut(sentence.strip())
        outstr = ''  # 返回值是字符串
        for word in sentence_seged:
            if word not in self.stopwords:
                if word != '\t':
                    outstr += word
                    outstr += " "
        return outstr

# wc = WordCut()
def textcharClassify(seq):
    with open(r'F:\Resources\kdata\cnews\cnews.vocab.txt', encoding='utf8') as file:
        vocabulary_list = [k.strip() for k in file.readlines()]
        word2id_dict = dict([(b, a) for a, b in enumerate(vocabulary_list)])
        content = [[word2id_dict[word] for word in seq if word in word2id_dict]]
        train_X = kr.preprocessing.sequence.pad_sequences(content, 600)
        if train_X.ndim < 2:
            train_X = np.expand_dims(train_X)
        # K.learning_phase(1)
        predY = model.predict(train_X)
        print(idToLabel)
        print(np.max(predY[0]))
        print(str(np.argmax(predY[0])))
        r = idToLabel[str(np.argmax(predY[0]))]
        print(r)
        return r

def textClassify(seq):
    content = wc.seg_sentence(seq)
    content = [content]
    contents = tkp.texts_to_sequences(content)
    contents = sequence.pad_sequences(contents,
                                      maxlen=maxLength,
                                      value=0,
                                      )
    if contents.ndim < 2:
        contents = np.expand_dims(contents)
    predY = model.predict(contents)
    print(idToLabel)
    print(str(np.argmax(predY[0])))
    r = idToLabel[str(np.argmax(predY[0]))]
    print(r)
    return r

app = Flask(__name__)
sess = K.get_session()

@app.route("/predict/",methods=['POST','GET'])
def predict():
    sentence = request.args.get("sentence","")
    with sess.graph.as_default():
        # result = textClassify(sentence)
        result = textcharClassify(sentence)
    return jsonify(result=result)

if __name__ == "__main__":
    # app.run(debug=True)