import numpy as np
import sys
import cv2
from collections import  Counter
import tritonclient.grpc as grpcclient
from tritonclient import utils
from tritonclient.utils import InferenceServerException
import tritonclient.utils.cuda_shared_memory as cudashm

from client_tools.processing import preprocess_foam,url_to_image


import time
import multiprocessing
from multiprocessing import Pool
import os
from ctypes import *
from PIL import Image

class unetclient():
    url = '192.168.1.159:7001'
    model = 'unet'
    ssl = False
    root_certificates = None
    private_key = None
    certificate_chain = None
    client_timeout = None
    model_info = False
    verbose = False

    INPUT_H = 360
    INPUT_W = 640
    OUTPUT_SIZE = 640*360
    CONF_THRESH = 0.5

    def __init__(self,url = '192.168.1.159:7001',model='unet',gpu_id = 1,co_no = 1):
        self.cuda_shm_used = 0
        self.url = url
        self.model = model
        self.input_byte_size = 1*3*360*640*4
        self.output_byte_size = self.input_byte_size
        self.shm_ip0_handle = 0
        self.shm_op0_handle = 0
        self.gpu_id = gpu_id
        self.co_no = co_no
        tempinname = ["foam_1_input0_data_no",str(self.co_no),"_gpu",str(self.gpu_id)]
        tempoutname = ["foam_1_output0_data_no",str(self.co_no),"_gpu",str(self.gpu_id)]
        self.input_data_name = '_'.join(tempinname)
        self.output_data_name = '_'.join(tempoutname)
        # Create server context
        try:
            triton_client = grpcclient.InferenceServerClient(
                url=self.url,
                verbose=self.verbose,
                ssl=self.ssl,
                root_certificates=self.root_certificates,
                private_key=self.private_key,
                certificate_chain=self.certificate_chain)
        except Exception as e:
            print("context creation failed: " + str(e))
            sys.exit(1)

        # Health check
        if not triton_client.is_server_live():
            print("FAILED : is_server_live")
            sys.exit(1)

        if not triton_client.is_server_ready():
            print("FAILED : is_server_ready")
            sys.exit(1)

        if not triton_client.is_model_ready(self.model):
            print("FAILED : is_model_ready",self.model)
            sys.exit(1)


        try:
            metadata = triton_client.get_model_metadata(self.model)
            #print(metadata)
        except InferenceServerException as ex:
            if "Request for unknown model" not in ex.message():
                print("FAILED : get_model_metadata")
                print("Got: {}".format(ex.message()))
                sys.exit(1)
            else:
                print("FAILED : get_model_metadata")
                sys.exit(1)

        # Model configuration
        try:
            config = triton_client.get_model_config(self.model)
            if not (config.config.name == self.model):
                print("FAILED: get_model_config")
                sys.exit(1)
            # print(config)
        except InferenceServerException as ex:
            print("FAILED : get_model_config")
            print("Got: {}".format(ex.message()))
            sys.exit(1)
        
        try:
            print("input_data",triton_client.get_cuda_shared_memory_status(self.input_data_name))
            print('output_data_name',triton_client.get_cuda_shared_memory_status(self.output_data_name))
            print(triton_client.get_cuda_shared_memory_status())
            print("memory has been used")
            self.cuda_shm_used =1
        except:
            print("cuda_shm 未占用")

        if self.cuda_shm_used == 1:
            sys.exit(1)

        self.shm_ip0_handle = cudashm.create_shared_memory_region(
        self.input_data_name, self.input_byte_size, self.gpu_id)
        self.shm_op0_handle = cudashm.create_shared_memory_region(
        self.output_data_name, self.output_byte_size, self.gpu_id)

        triton_client.register_cuda_shared_memory(
        self.output_data_name, cudashm.get_raw_handle(self.shm_op0_handle), self.gpu_id,
        self.output_byte_size)
        # Register input0 and Output0  shared memory with Triton Server
        triton_client.register_cuda_shared_memory(
        self.input_data_name, cudashm.get_raw_handle(self.shm_ip0_handle), self.gpu_id,
        self.input_byte_size)

        self.triton_client = triton_client
    
        #这里开始添加程序
    

    def infer(self,img_type,input_img,confidence = 0.5,test = 0):
        confidence = confidence

        if not input_img:
            print("FAILED: no input image")
            sys.exit(1)
        if img_type == 0 :
            input_image = cv2.imread(input_img)
        elif img_type == 1 :
            input_image = url_to_image(input_img)
        else:
            print("FAILED: img_type is wrong")
            sys.exit(1)
        if input_image is None:
            print(f"FAILED: could not load input image {str(input_img)}")
            sys.exit(1)

        #这里前处理改了改
        #input_image_buffer,dw,dh,padding_w,padding_h= preprocess(input_image)
        input_image_buffer = preprocess_foam(input_image,360,640)
        foam_1_input0_data_no1 = np.expand_dims(input_image_buffer, axis=0)

        # Put input data values into shared memory
        cudashm.set_shared_memory_region(self.shm_ip0_handle, [foam_1_input0_data_no1])

        inputs = []
        outputs = []
        inputs.append(grpcclient.InferInput('data', [1, 3, 360, 640], "FP32"))
        outputs.append(grpcclient.InferRequestedOutput('prob'))

        # Set the parameters to use data from shared memory
        inputs[-1].set_shared_memory(self.input_data_name, self.input_byte_size)
        outputs[-1].set_shared_memory(self.output_data_name, self.output_byte_size)
        #time2_debug =time.time()
        # print("Invoking inference...")
        results = self.triton_client.infer(model_name=self.model,
                                    inputs=inputs,
                                    outputs=outputs,
                                    client_timeout=self.client_timeout)
        #time3_debug =time.time()                            
        if self.model_info:time3_debug =time.time()                            
        if self.model_info:
            statistics = self.triton_client.get_inference_statistics(model_name=self.model)
            if len(statistics.model_stats) != 1:
                print("FAILED: get_inference_statistics")
                sys.exit(1)
            statistics = self.triton_client.get_inference_statistics(model_name=self.model)
            if len(statistics.model_stats) != 1:
                print("FAILED: get_inference_statistics")
                sys.exit(1)
            # print(statistics)
        # print("load model done")


        output0 = results.as_numpy('prob')
        if  output0 is not None:
            result = cudashm.get_contents_as_numpy(
                self.shm_op0_handle, output0.dtype,
                output0.shape)
        else:
            print("OUTPUT0 is missing in the response.")
            sys.exit(1)
            
        print(len(result[result>0]),result.size)
        res = 1/(1 + np.exp(-result))
        res = np.squeeze(res)
        res = res > 0.5
        res = (res*255).astype(np.uint8)

        image= Image.fromarray(res)
        image.save("1.jpg")

        print(len(res[res==255]),res.size)
        return len(res[res==255])/res.size

    def __del__(self):#当程序结束时运行
        if self.cuda_shm_used == 0:
            self.triton_client.unregister_cuda_shared_memory(self.input_data_name)
            self.triton_client.unregister_cuda_shared_memory(self.output_data_name)
            cudashm.destroy_shared_memory_region(self.shm_ip0_handle)
            cudashm.destroy_shared_memory_region(self.shm_op0_handle)
        else:
            print("系统异常结束")




