#!/usr/bin/env python
#-*-coding:utf-8-*-



import tensorflow as tf
import pickle
import numpy as np
from tensorflow.contrib.crf import viterbi_decode
SEG_DICT = {"B": 0, "M": 1, "E": 2, "S": 3}
def decode(logits, trans, sequence_lengths, tag_num):
    viterbi_sequences = []
    small = -1000.0
    start = np.asarray([[small] * tag_num + [0]])
    for logit, length in zip(logits, sequence_lengths):
        score = logit[:length]
        pad = small * np.ones([length, 1])
        logits = np.concatenate([score, pad], axis=1)
        logits = np.concatenate([start, logits], axis=0)
        viterbi_seq, viterbi_score = viterbi_decode(logits, trans)
        viterbi_sequences += [viterbi_seq]
    return viterbi_sequences


class Predictor(object):
    def __init__(self, map_file, checkpoint_dir):
        with open(map_file, "rb") as f:
            char_to_id, id_to_char, tag_to_id, id_to_tag = pickle.load(f)

        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True
        self.tag_num = len(tag_to_id)

        self.tf_config = tf_config
        self.char_to_id = char_to_id
        self.id_to_tag = {v:k for k, v in tag_to_id.items()}
        self.id_to_char = id_to_char
        self.checkpoint_dir = checkpoint_dir

        self.graph = tf.Graph()
        self.checkpoint_file = tf.train.latest_checkpoint(self.checkpoint_dir)
        with self.graph.as_default():
            session_conf = tf.ConfigProto(
                allow_soft_placement = True,
                log_device_placement = False )
            self.sess = tf.Session(config=session_conf)

            with self.sess.as_default():
                saver = tf.train.import_meta_graph("{}.meta".format(self.checkpoint_file))
                saver.restore(self.sess, self.checkpoint_file)
                self.char_inputs = self.graph.get_operation_by_name("chatInputs").outputs[0]
                self.seg_inputs = self.graph.get_operation_by_name("segs").outputs[0]
                self.dropout = self.graph.get_operation_by_name("dropout").outputs[0]
                self.trans = self.graph.get_operation_by_name("crf_loss/transitions").outputs[0].eval()
                self.logits = self.graph.get_operation_by_name("project/logits").outputs[0]
                self.pred = self.graph.get_operation_by_name("project/output/pred").outputs[0]


    def predict(self, words, segs):
        char_inputs = []
        seg_inputs = []
        for w, s in zip(words, segs):
            if w not in self.char_to_id:
                char_inputs.append(self.char_to_id["<OOV>"])
            else:
                char_inputs.append(self.char_to_id[w])
            seg_inputs.append(SEG_DICT.get(s))

        input_x1 = np.array(char_inputs).reshape(1, len(char_inputs))
        seg_inputs = np.array(seg_inputs).reshape(1, len(seg_inputs))

        feed_dict = {
            self.char_inputs: input_x1,
            self.seg_inputs : seg_inputs,
            self.dropout: 1.0
        }

        logits, pred = self.sess.run([self.logits, self.pred], feed_dict=feed_dict)

        path = decode(logits, self.trans, [input_x1.shape[1]], self.tag_num)
        path = path[0][1:]
        tags = [self.id_to_tag[p] for p in path]
        return tags


if __name__ == '__main__':
    checkpoint_dir = "/home/wuzheng/Pyprojects/bsner/results/ner/ckpt"
    map_file = "/home/wuzheng/Pyprojects/bsner/results/ner/maps.pkl"
    predictor = Predictor(map_file=map_file, checkpoint_dir=checkpoint_dir)
    words = ['刚', '刚', '在', '英', '国', '完', '成', '大', '婚', '的', '周', '杰', '伦', '，', '又', '有', '新', '消', '息', '传', '来', '。', '不', '过', '，', '不', '是', '粉', '丝', '们', '翘', '首', '以', '盼', '的', '“', '昆', '凌', '有', '喜', '”', '的', '信', '息', '，', '而', '是', '第', '四', '季', '《', '中', '国', '好', '声', '音', '》', '公', '布', '首', '位', '导', '师', '，', '周', '董', '确', '认', '加', '盟', '，', '成', '为', '“', '好', '声', '音', '”', '史', '上', '第', '八', '位', '导', '师', '。']
    segs = ['B', 'E', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'S', 'B', 'M', 'E', 'S', 'S', 'S', 'S', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'S', 'S', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'S', 'S', 'S', 'B', 'E', 'B', 'E', 'S', 'S', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'S', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'B', 'E', 'S', 'S', 'B', 'E', 'S', 'B', 'E', 'B', 'E', 'B', 'E', 'S', 'B', 'E', 'S', 'S', 'B', 'E', 'S', 'S', 'S', 'B', 'E', 'S', 'B', 'E', 'S']
    ners = predictor.predict(words, segs)
    print(ners)


