import pickle
from keras.models import load_model
from data_transform import DataTransform
import numpy as np

 
class Predictor:
    def __init__(self, num_words=40000, max_len=400,
                 accusation_path='model/CNN_accusation.h5',
                 relevant_articles_path='model/CNN_relevant_articles.h5',
                 imprisonments_path='model/CNN_imprisonment.h5',
                 tokenizer_path='model/tokenizer_fact_40000.pkl'):
        self.num_words = num_words
        self.max_len = max_len
        self.accusation_path = accusation_path
        self.relevant_articles_path = relevant_articles_path
        self.batch_size = 512
        self.content_transform = DataTransform()
        self.tokenizer_path = tokenizer_path
        self.models = {
            'accusation': load_model(accusation_path),
            'relevant_articles': load_model(relevant_articles_path),
            'imprisonment': load_model(imprisonments_path)
        }

    def predict(self, data):
        transform = DataTransform()
        # 分词
        content_cut = transform.cut_texts(texts=data, word_len=2)
        with open(self.tokenizer_path, mode='rb') as f:
            tokenizer_fact = pickle.load(f)
        transform.text2seq(texts_cut=content_cut, tokenizer_fact=tokenizer_fact,
                           num_words=self.num_words, maxlen=self.max_len)
        content_fact_pad_seq = np.array(transform.fact_pad_seq)

        accusation = self.models['accusation'].predict(content_fact_pad_seq)
        relevant_articles = self.models['relevant_articles'].predict(content_fact_pad_seq)
        imprisonment = self.models['imprisonment'].predict(content_fact_pad_seq)
        accusation = transform.one_hot_to_str(accusation, 'accusation')
        relevant_articles = transform.one_hot_to_str(relevant_articles, 'relevant_articles')
        imprisonment = transform.imprisonment_transform(imprisonment)

        return np.concatenate([accusation, relevant_articles, imprisonment], axis=1)

    def web_predict(self, data, transform, tokenizer_fact):
        # 分词
        content_cut = transform.cut_texts(texts=data, word_len=2)
        transform.text2seq(texts_cut=content_cut, tokenizer_fact=tokenizer_fact,
                           num_words=self.num_words, maxlen=self.max_len)
        content_fact_pad_seq = np.array(transform.fact_pad_seq)

        accusation = self.models['accusation'].predict(content_fact_pad_seq)
        relevant_articles = self.models['relevant_articles'].predict(content_fact_pad_seq)
        imprisonment = self.models['imprisonment'].predict(content_fact_pad_seq)
        accusation = transform.one_hot_to_str(accusation, 'accusation')
        relevant_articles = transform.one_hot_to_str(relevant_articles, 'relevant_articles')
        imprisonment = transform.imprisonment_transform(imprisonment)

        return np.concatenate([accusation, relevant_articles, imprisonment], axis=1)

    def predict_one(self, data, mode):
        transform = DataTransform()
        # 分词
        content_cut = transform.cut_texts(texts=data, word_len=2)
        with open(self.tokenizer_path, mode='rb') as f:
            tokenizer_fact = pickle.load(f)
        transform.text2seq(texts_cut=content_cut, tokenizer_fact=tokenizer_fact,
                           num_words=self.num_words, maxlen=self.max_len)
        content_fact_pad_seq = np.array(transform.fact_pad_seq)

        predict = self.models[mode].predict(content_fact_pad_seq)
        if mode == 'imprisonment':
            result = transform.imprisonment_transform(predict)
        else:
            result = transform.one_hot_to_str(predict, mode)
        return result


if __name__ == '__main__':
    content = ['孝昌县人民检察院指控：2014年1月4日，被告人邬某在孝昌县城区2路公交车上××被害人晏某'
               '白色VIVO手机一部。经鉴定，该手机价值为750元。针对上述指控，公诉机关当庭宣读了被告人'
               '供述、被害人陈述及证人证言；出示了发还物品清单、手机照片、辨认笔录及照片、价格鉴定意见'
               '书、释放证明书、抓获破案经过、户籍证明等证据材料。公诉机关认为被告人邬某的行为已构成××罪'
               '；同时，公诉机关还建议对被告人邬某在××以内量刑。']

    # predictor = Predictor()
    # m = predictor.predict(content)
    # print(m)

    predictor = Predictor()
    transform = DataTransform()
    with open('model/tokenizer_fact_40000.pkl', mode='rb') as f:
        tokenizer_fact = pickle.load(f)
    print(predictor.web_predict(content, transform, tokenizer_fact))
