import contextlib
import json
import io
import numpy as np
import httpx
import yaml
import subprocess
import time
import docker

from tritonclient.http import InferenceServerClient

from server.utils import generate_request_id

with open("../config/inference.yaml", "r",encoding='utf-8') as config_file:
    config = yaml.safe_load(config_file)

inference_config = config["inference"]
models = inference_config["models"]

class TritonInferenceClient:
    """
    类的初始化方法。

    参数:
    - server_url: str，服务端的URL地址。
    - model_name: str，模型的名称。
    - input_name: str，输入的名称。
    - output_name: str，输出的名称。

    返回值:
    - 无。
    """
    # def __init__(self, server_url: str, model_name: str, input_name: str, output_name: str):
    #     self.server_url = server_url  # 服务端URL
    #     self.model_name = model_name  # 模型名称
    #     self.input_name = input_name  # 输入名称
    #     self.output_name = output_name  # 输出名称
    #     self.client = httpx.AsyncClient()  # 初始化一个异步HTTP客户端
    def __init__(self, model_name: str):
        self.model_name = model_name  # 模型名称
    async def run_dockerServer(self,model_name,server_name,tag,repo_path,prot):


        # Define image https://catalog.ngc.nvidia.com/orgs/nvidia/containers/tritonserver
        # tag = 'nvcr.io/nvidia/tritonserver:23.09-py3'  # 6.4 GB

        # Pull the image
        subprocess.call(f'docker pull {tag}', shell=True)

        # Run the Triton server and capture the container ID
        container_id = subprocess.check_output(
            f'docker run -d --rm -v {repo_path}:/models -p {prot}:8000 {tag} {server_name} --model-repository=/models',
            shell=True).decode('utf-8').strip()

        # Wait for the Triton server to start
        triton_client = InferenceServerClient(url=f'localhost:{prot}', verbose=False, ssl=False)

        # Wait until model is ready
        for _ in range(10):
            with contextlib.suppress(Exception):
                assert triton_client.is_model_ready(model_name)
                break
            time.sleep(1)

    async def get_client(self,model_name: str):
        # 遍历 models，找到name为[model_name]的值
        for model in models:
            if model["name"] == model_name:
                #判断 model["triton_server"] 是否存在
                if "triton_server" in model:
                    # 获取 triton_server 的值
                    triton_server = model["triton_server"]
                    #远程连接triton_server推理服务器
                    triton_client = InferenceServerClient(url=f'{triton_server["protocol"]://triton_server["url"]}', verbose=False, ssl=False)
                    #验证模型是否加载成功
                    if triton_client.is_model_ready(model_name):
                        return triton_client
                else:
                    client = docker.from_env()
                    try:
                        container = client.containers.get(model["server_name"])
                        if container.status == 'running':
                            triton_client = InferenceServerClient(url=f'localhost:{model["prot"]}', verbose=False,
                                                                  ssl=False)
                            # 验证模型是否加载成功
                            if triton_client.is_model_ready(model_name):
                                return triton_client
                    except docker.errors.NotFound:
                        # 提示请检查docker服务
                        print("请检查docker服务是否启动")

                    #启动服务
                    await self.run_dockerServer(model_name,model["server_name"],model["tag"],model["repo_path"],model["prot"])
                    triton_client = InferenceServerClient(url=f'localhost:{model["prot"]}', verbose=False,
                                                          ssl=False)
                    # 验证模型是否加载成功
                    if triton_client.is_model_ready(model_name):
                        return triton_client



    async def run_inference(self, inputs: dict) -> dict:
        """
        在指定的模型上使用给定的输入运行推理。

        参数:
            inputs (dict): 包含输入张量的字典，其中键是输入名称，值是numpy数组。

        返回:
            dict: 包含输出张名量的字典，其中键是输出称，值是numpy数组。
        """
        headers = {
            "Content-Type": "application/json",
        }

        data = {
            "id": "1",  # 请求的唯一标识符
            "inputs": [
                {
                    "name": self.input_name,  # 输入名称
                    "data": inputs[self.input_name].tolist(),  # 输入数据转换为列表
                    "datatype": "FP32",  # 数据类型
                    "shape": inputs[self.input_name].shape,  # 数据形状
                }
            ],
            "outputs": [
                {"name": self.output_name},  # 输出名称
            ],
            "model_name": self.model_name,  # 模型名称
        }
        outputs = []
        try:
            self.client = self.get_client(self.model_name)
            outputs.append(self.client.InferRequestedOutput('output0'))

            # 向服务器发送推理请求
            response = self.client.async_infer(
                model_name=self.model_name,
                model_version="1",
                inputs=inputs,
                outputs=outputs,
                request_id=generate_request_id()
            )
            #
            # response = await self.client.post(
            #     f"{self.server_url}/v2/models/{self.model_name}/infer",
            #     headers=headers,
            #     content=json.dumps(data),
            # )
            response.raise_for_status()  # 如果请求失败，则引发异常
        except httpx.HTTPError as e:
            raise ValueError(f"运行推理时出错: {e}")

        outputs = self._parse_response(response.json()["outputs"])  # 解析响应中的输出

        return outputs


    def _parse_response(self, outputs_json: list[dict]) -> dict:
        """
        解析Triton的JSON格式输出，将其转换成一个包含numpy数组的字典。

        参数:
            outputs_json (list[dict]): 表示Triton输出张量的字典列表。

        返回:
            dict: 包含输出张量的字典，其中键是输出名称，值是numpy数组。
        """
        # 初始化输出字典
        outputs = {}
        # 遍历输出JSON列表
        for output in outputs_json:
            name = output["name"]  # 获取输出名称
            # 将数据转换成numpy数组，并根据形状重新塑形
            data = np.array(output["data"], dtype=np.float32).reshape(output["shape"])
            outputs[name] = data  # 将名称和数据添加到输出字典

        return outputs  # 返回解析后的输出字典

    async def close(self):
        """Close the underlying HTTPX client."""
        await self.client.aclose()
