# coding:utf-8

import pickle
import thulac
import re
from gensim.models import KeyedVectors
import numpy as np
import tensorflow as tf
import tensorflow.contrib.keras as kr
from chinese2digit import chinese2digits,changeChineseNumToArab
from cnn_model import TCNNConfig, TextCNN
from data_helper import replace_stop_info
from cnews_loader import read_vocab,get_word_embedding

class Predict:
    def __init__(self,model_path,glove,is_w2v=False,is_network=False,fn=None,vocab_path=None,labels_path=None):

        self.model_path = model_path
        self.thu = thulac.thulac(seg_only=True)
        self.w2v = []
        self.is_w2v = is_w2v
        if self.is_w2v:
            self.w2v = glove

        if is_network and fn is not None and vocab_path is not None and labels_path is not None:
            self.model_config = TCNNConfig()
            self.model_config.is_w2v = True
            words, word_to_id = read_vocab(vocab_path)
            model = self.w2v
            w2v = []
            for word in words:
                if word in model.vocab.keys():
                    w2v.append(model[word])
                else:
                    w2v.append(np.random.uniform(-1, 1, self.model_config.embedding_dim))
            w2v = np.asarray(w2v)
            #self.w2v = get_word_embedding(glove_path, vocab_path, self.model_config.embedding_dim)
            self.model = TextCNN(self.model_config,w2v)
            tf_config = tf.ConfigProto()
            tf_config.gpu_options.allow_growth = True
            self.sess = tf.Session(config=tf_config)
            self.saver = tf.train.Saver(tf.global_variables())
            self.saver.restore(self.sess, save_path=self.model_path)
            self.fn = fn
            self.word_to_id = read_vocab(vocab_path)[1]
            with open(labels_path,encoding='utf-8') as f:
                self.id_to_label = {}
                for i,label in enumerate(f.readlines()):
                    self.id_to_label[i] = int(label.strip())
        else:
            self.model, self.tfidf = pickle.load(open(self.model_path, 'rb'))

    def predict(self,content,embedding_type):
        patten = re.compile('.*?刑法第.*?条',re.I)
        if str(re.match(patten,content)) != 'None':
            for r in re.finditer(patten,content):
                #return int(changeChineseNumToArab(r.group().replace('刑法第', '').replace('条', ''))),[]
                return int(changeChineseNumToArab(r.group()[r.group().index('刑法第')+3:r.group().index('条',r.group().index('刑法第')+3)])), [],1.0

        else:
            patten = re.compile('.*?（.*?第.*?条', re.I)
            if str(re.match(patten, content)) != 'None':
                for r in re.finditer(patten, content):
                    return int(changeChineseNumToArab(r.group()[r.group().index('第')+1:r.group().index('条')])),[],1.0
            else:
                seg_content = self.thu.cut(content,text=True)

                test_x = self.tfidf.transform([seg_content]).toarray()
                yred = int(self.model.predict(test_x)[0])
                prob = np.max(self.model.predict_proba(test_x))
                if self.is_w2v and embedding_type == 'glove':
                    test_x = []
                    for word in seg_content.split(' '):
                        if word in self.w2v.vocab.keys():
                            test_x.append(self.w2v[word])
                    test_x = np.average(test_x,axis=0)

                return yred,test_x,prob

    # 由神经网络模型预测
    def predict_by_net(self,content):
        #content = replace_stop_info(content,self.fn)
        content = self.thu.cut(content,text=True)
        content = [content.split(' ')]
        feet_dict = {
            self.model.input_x : self.process_file(content,self.word_to_id),
            self.model.keep_prob:1.0
        }
        with self.sess.as_default():
            act_logits = self.sess.run(self.model.act_logits,feed_dict=feet_dict)[0]
            #results = []
            probs = sorted(act_logits,reverse=True)
            probs = [np.round(float(p),2) for p in probs]
            results = np.argsort(act_logits)
            results = results[::-1]
            results = [self.id_to_label[r] for r in results]
            # for i,act_logit in enumerate(act_logits):
            #     if act_logit > 0.5:
            #         results.append(self.id_to_label[i])
            #         probs.append(np.round(act_logit,2))
            # if len(results) < 1:
            #     results.append(self.id_to_label[np.argmax(act_logits)])
            #     probs.append(np.round(act_logits[np.argmax(act_logits)],2))
        return results,probs

    def process_file(self, contents,word_to_id, max_length=600):
        """将文件转换为id表示"""
        data_id, label_id = [], []
        for i in range(len(contents)):
            data_id.append([word_to_id[x] for x in contents[i] if x in word_to_id])
        # 使用keras提供的pad_sequences来将文本pad为固定长度
        x_pad = kr.preprocessing.sequence.pad_sequences(data_id, max_length,padding='post',truncating='post')
        # y_pad = kr.utils.to_categorical(label_id, num_classes=len(cat_to_id))  # 将标签转换为one-hot表示
        return x_pad


if __name__ == '__main__':

    #model_path = './datasets/model_1828.pkl'
    base_dir = '/home/abc/pySpace/law_match'
    model_path = base_dir+'/checkpoints/textcnn_11231233/best_validation'
    glove_path = base_dir+'/datasets/w2v_xingshi.txt'
    familyname_path = base_dir+'/datasets/gamedatas/familyname.txt'
    vocab_path = base_dir+'/datasets/gamedatas/vocab.txt'
    labels_path = base_dir + '/datasets/gamedatas/labels.txt'
    with open(familyname_path, encoding='utf-8') as f:
        fns = "".join([line.strip() for line in f.readlines()])
        fn = "[" + fns + "]" + "[1-9]*某+[甲乙丙丁午己庚辛壬癸]*" \
             + "|[" + fns + "]" + "x+[甲乙丙丁午己庚辛壬癸]*"
    predict = Predict(model_path,glove_path,False,True,fn,vocab_path=vocab_path,labels_path=labels_path)

    #content = '刑法第一百一十一条规定的“国家秘密”，是指《中华人民共和国保守国家秘密法》第二条、第八条以及《中华人民共和国保守国家秘密法实施办法》第四条确定的事项。  刑法第一百一十一条规定的“情报”，是指关系国家安全和利益、尚未公开或者依照有关规定不应公开的事项。  对为境外机构、组织、人员窃取、刺探、收买、非法提供国家秘密之外的情报的行为，以为境外窃取、刺探、收买、非法提供情报罪定罪处罚。'
    content = '使用假药、劣药或不符合国家规定标准的卫生材料、医疗器械，足以严重危害人体健康的；'
    predict.predict_by_net(content)
    #yred,test_x = predict.predict(content,'tfidf')
    #print(yred)