import grpc
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import tensorflow as tf
import requests
from tensorflow.python.framework.tensor_util import make_tensor_proto
from tensorboard.util.tensor_util import make_ndarray


class TfServingEng(object):

    """
    tfserving-client, 当前实现GRPC协议，同步调用
    模型输入结点名称：input
    模型输出结点名称：out
    模型签名（signature_name）：model

    """

    def __init__(self, tfserving_dict, timeout=10,use_grpc=True,**kwargs):

        self.model_name = tfserving_dict['model_name']
        self.timeout = timeout
        self.server_ip = tfserving_dict['host']
        self.server_http_port = tfserving_dict['http_port']
        self.server_grpc_port = tfserving_dict['grpc_port']

        self.grpc_channel = grpc.insecure_channel("%s:%s" % ( self.server_ip,self.server_grpc_port))
        self.stub = prediction_service_pb2_grpc.PredictionServiceStub(self.grpc_channel)

        self.use_grpc = use_grpc


    def _create_message(self, tensor_data_json):
        if self.use_grpc:
            request_message = predict_pb2.PredictRequest()
            request_message.model_spec.name = self.model_name
            request_message.model_spec.signature_name = "serving_default"
            for key, tensor_data in tensor_data_json['inputs'].items():
                request_message.inputs[key].CopyFrom(
                    make_tensor_proto(tensor_data.tolist(), dtype=tf.float32))  # shape跟 keras的model.input类型对应

        else:
            request_message = {}
            for key,value in tensor_data_json['inputs'].items():
                request_message[key] = value.tolist()

        return request_message


    @property
    def http_request_url(self):
        return 'http://{self.server_ip}:{self.server_http_port}/v1/models/{self.model_name}:predict'.format(self=self)


    def predict_result(self,tensor_data_json):

        if self.use_grpc:
            result = self.stub.Predict(tensor_data_json, self.timeout)  # 10 secs timeout
            results = {}
            for key in result.outputs:
                tensor_proto = result.outputs[key]
                results[key] = make_ndarray(tensor_proto).tolist()
        else:
            r = requests.post(self.http_request_url, json={"inputs": tensor_data_json},timeout=self.timeout)
            results = r.json()['outputs']


        if type(results) == dict :
            if len(results.keys()) == 1:
                results = results[list(results.keys())[0]]

        return results



    def predict(self, tensor_data_json):

        """
        模型推理方法
        """
        request_message = self._create_message(tensor_data_json)
        res = self.predict_result(request_message)

        return res