from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
import grpc
import tensorflow as tf
import numpy as np
import sys
import threading

from PIL import Image

 # 设置分类名称
CLASSES = {
    'akiec':'日光性角化症或上皮内癌',
    'bcc': '基底细胞癌',
    'bkl': '良性角化病',
    'df': '皮肤纤维瘤',
    'mel': '黑色素瘤',
    'nv': '黑色素细胞痣',
    'vasc': '血管性皮肤病变',
    }

def request_server(img_resized, server_url):
    '''
    用于向TensorFlow Serving服务请求推理结果的函数。
    :param img_resized: 经过预处理的待推理图片数组，numpy array，shape：(h, w, 3)
    :param server_url: TensorFlow Serving的地址加端口，str，如：'0.0.0.0:8500' 
    :return: 模型返回的结果数组，numpy array
    '''
    # Request.
    channel = grpc.insecure_channel(server_url)
    stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
    request = predict_pb2.PredictRequest()
    request.model_spec.name = "mobileV1"  # 模型名称，启动容器命令的model_name参数
    request.model_spec.signature_name = "serving_default"  # 签名名称，刚才叫你记下来的
    # "input_1"是你导出模型时设置的输入名称，刚才叫你记下来的
    request.inputs["input_1"].CopyFrom(
        tf.make_tensor_proto(img_resized, shape=[1, ] + list(img_resized.shape), dtype=tf.float32))
    response = stub.Predict(request, 5.0)  # 5 secs timeout
    # print(response)
    return np.asarray(response.outputs["dense"].float_val) # fc2为输出名称，刚才叫你记下来的


# 我们测试一下, 读取图片并发送请求


def pre_grpc(img_path): # 这里应该传入模型信息
    # import numpy as np

    # img_path = '/usr/share/nginx/skinmodel/request_files/test_example/df.png' #使用绝对路径
    x = Image.open(img_path)
    # x.save(img_path, "jpg")
    x = x.resize((224, 224), Image.ANTIALIAS)
    x = np.array(x).astype('float32')
    x = (x - 224.) / 224.

    # grpc地址及端口，为你容器所在机器的ip + 容器启动命令里面设置的port
    server_url = '0.0.0.0:8500'  
    predict_message = request_server(x, server_url)
    # arr_predict_message = np.array(predict_message)

    return predict_message