# coding:utf-8
'''
预测类
@author:wangyi
'''
from configs import config
import os
import tensorflow as tf
import pandas as pd
from model import Model
from data_helper import process
import tensorflow.contrib.keras as kr
import numpy as np
class Predict(object):

    def predict(self,test_x,model,sess):

        test_x = np.expand_dims(test_x,0)
        # 喂数据
        feed_dict = {
            model.input_x: test_x,
            model.keep_drop_prob: 1.0
        }

        pred_prob = sess.run(model.pred_prob,feed_dict=feed_dict)
        sorted_args = [np.argsort(prob) for prob in pred_prob]
        #preds = sess.run(model.pred,feed_dict=feed_dict)
        label_to_id = process.get_label_id(config['label_file_path'])
        id_to_label = {label_to_id[key]:key for key in label_to_id.keys()}
        result = []
        for i in range(len(pred_prob)):
            if pred_prob[i][int(sorted_args[i][-1])]>0.5:
                result.append(id_to_label[sorted_args[i][-1]])
            else:
                result.append(id_to_label[sorted_args[i][-2]])
        #result = [id_to_label[pred] for pred in preds if pred in id_to_label]
        return result


    # 获取测试数据
    def get_test_data(self):
        df = pd.read_csv(config['test_file_path'],encoding='utf-8')
        if config['char_or_word'] == 'word':
            contents = list(df['word_seg'])
        elif config['char_or_word'] == 'char':
            contents = list(df['article'])
        else:
            raise ValueError('please choose word or char for test data')
        word_to_id = process.create_word_label(config['train_file_path'], config['char_or_word'],
                                               config['vocab_file_path'], config['vocab_size'])
        x_id = []
        for content in contents:
            x_id.append([word_to_id[con] for con in str(content).split(' ') if con in word_to_id])
        return kr.preprocessing.sequence.pad_sequences(x_id,config['seq_length'],padding='post',truncating='post')
    # 输出至csv
    def output_csv(self,result,outf):
        output_list = [[i,label] for i,label in enumerate(result)]
        df = pd.DataFrame(output_list,columns=['id','label'])
        df.to_csv(outf,encoding='utf-8',index=False)

if __name__ == '__main__':
    predict = Predict()
    # 获取测试数据
    x_test = predict.get_test_data()
    result = []
    # 清除默认图堆栈 防止显存溢出
    tf.reset_default_graph()
    # 载入新图
    with tf.Graph().as_default():
        model = Model(config['model_name'], config['embedding_size'], config['seq_length'], config['filter_sizes'],
                      config['filter_nums'], config['num_class'], config['vocab_size'],
                      config['inital_learning_rate'],config['decay_rate'], config['decay_rate'],0.0)
        # 创建会话
        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True
        sess = tf.Session(config=tf_config)
        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())
        # 载入模型
        saver.restore(sess, save_path=os.path.join(config['save_dir'], config['checkpoint_path']))
    for x in x_test:
        result.extend(predict.predict(x,model,sess))
    predict.output_csv(result,config['output_file_path'])
    print(result)
