#coding:utf-8 
import os
import sys
import time
import tensorflow as tf  
from tensorflow import saved_model
import numpy as np
from utils.embedding.tokenization import FullTokenizer
from tensorflow.python.saved_model import tag_constants
from utils.embedding.setting import *

os.environ["CUDA_VISIBLE_DEVICES"] = "0"

class SentenceEmbedding():

    def __init__(self, model_dir=saved_model_path, vocab_file=vocab_file, max_seq_length=64, use_cpu=True):

        self._model_dir  = model_dir
        self._vocab_file = vocab_file
        self.max_seq_length = max_seq_length
        self.tokenizer = FullTokenizer(self._vocab_file, do_lower_case=True)
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        config.allow_soft_placement = True
        config.log_device_placement = False
        config.gpu_options.per_process_gpu_memory_fraction = 0.8
        if use_cpu:
            ## cpu下限制一下占用资源 防止cpu使用率太高
            config.intra_op_parallelism_threads = 4
            config.inter_op_parallelism_threads = 4
            config.device_count['CPU'] =  4
        self._session = tf.Session(graph=tf.Graph(),config=config)
        # load model
        meta_graph_def = tf.saved_model.loader.load(self._session, [tag_constants.SERVING], self._model_dir)
        signature = meta_graph_def.signature_def
        #print(signature)
        signature_key = tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
        input_ids_name = signature[signature_key].inputs["input_ids"].name
        input_mask_name = signature[signature_key].inputs["input_mask"].name
        segment_ids_name = signature[signature_key].inputs["segment_ids"].name

        vec_name = signature[signature_key].outputs["sentence_embedding"].name
        # input 
        self.input_ids = self._session.graph.get_tensor_by_name(input_ids_name)
        self.input_mask   = self._session.graph.get_tensor_by_name(input_mask_name)
        self.segment_ids = self._session.graph.get_tensor_by_name(segment_ids_name)
        # output
        self.embedding = self._session.graph.get_tensor_by_name(vec_name)
        
        self.inference("炙手可热的伯纳德·阿尔诺短暂超过杰夫·贝索斯成为世界首富")

    def inference(self,text):
        input_ids, input_mask, segment_ids = self.convert_single_example(text)
        assert len(input_ids) == len(input_mask) == len(segment_ids)
        _feed_dict = {
            self.input_ids: [input_ids],
            self.input_mask: [input_mask],
            self.segment_ids: [segment_ids],
        }
        vec = self._session.run(self.embedding, feed_dict=_feed_dict)
        return self.normalized(vec)[0].tolist()

    def batch_inference(self,texts):
        input_ids, input_mask, segment_ids = [], [], []
        for sA in texts:
            s_input_ids, s_input_mask, s_segment_ids = self.convert_single_example(sA)
            input_ids.append(s_input_ids)
            input_mask.append(s_input_mask)
            segment_ids.append(s_segment_ids)
        assert len(input_ids) == len(input_mask) == len(segment_ids)
        _feed_dict = {
            self.input_ids: input_ids,
            self.input_mask: input_mask,
            self.segment_ids: segment_ids,
        }
        vec = self._session.run(self.embedding, feed_dict=_feed_dict)
        return self.normalized(vec).tolist()

    def convert_single_example(self, text):
        text_tokens = self.tokenizer.tokenize(text)
        tokens = ["[CLS]"]
        segment_ids = [0]
        for token in text_tokens[:self.max_seq_length-2]:
            tokens.append(token)
            segment_ids.append(0)
        tokens.append("[SEP]")
        segment_ids.append(0)

        input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
        # The mask has 1 for real tokens and 0 for padding tokens. Only real
        # tokens are attended to.
        input_mask = [1] * len(input_ids)
        # Zero-pad up to the sequence length.
        while len(input_ids) < self.max_seq_length:
            input_ids.append(0)
            input_mask.append(0)
            segment_ids.append(0)
        assert len(input_ids)   == self.max_seq_length
        assert len(input_mask)  == self.max_seq_length
        assert len(segment_ids) == self.max_seq_length
        return input_ids, input_mask, segment_ids

    def cos_sim(self,a, b):
        dot_product = np.dot(a, b)
        norm_a = np.linalg.norm(a)
        norm_b = np.linalg.norm(b)
        return dot_product / (norm_a * norm_b)

    def normalized(self, raw_vec,round_num=8):
        res = np.linalg.norm(raw_vec,axis=1) + 1e-8
        raw_vec[res > 0] = raw_vec[res > 0] / res[res > 0][:, np.newaxis]
        return np.round(raw_vec,round_num)



if __name__ == "__main__":
    text = "炙手可热的伯纳德·阿尔诺短暂超过杰夫·贝索斯成为世界首富"
    embedding_client = SentenceEmbedding(saved_model_path, vocab_file)
    vector = embedding_client.inference(text)
    print(vector)

