#!/usr/bin/env python
# -*- coding: utf-8 -*-

import argparse
import json
import os
import time

import numpy as np
import torch
import tritonclient.http as httpclient
from tritonclient.utils import InferenceServerException

import cn_clip.clip as clip
from clip import load_from_name, available_models
from clip.utils import _MODELS, _MODEL_INFO, _download, available_models, create_model, image_transform

from PIL import Image
from tqdm import tqdm


def main():
    # 初始化Triton客户端
    try:
        triton_client_img = httpclient.InferenceServerClient(
            url="localhost:8000",
            verbose=False
        )
    except Exception as e:
        print(f"Failed to initialize Triton client: {e}")
        return

    # 检查模型是否就绪
    if not triton_client_img.is_model_ready("cn_clip_img"):
        print(f"Model detection is not ready")
        return

    # 准备输入数据
    # batch_data = batch_data.numpy().astype(np.float32)

    model_arch = "ViT-H-14"
    preprocess = image_transform(_MODEL_INFO[model_arch]['input_resolution'])
    # 示例皮卡丘图片，预处理后得到[1, 3, 分辨率, 分辨率]尺寸的Torch Tensor
    batch_data = preprocess(Image.open("examples/pokemon.jpeg")).unsqueeze(0).numpy().astype(np.float32)

    # batch_img_shape = [batch_data[0].numpy()]
    # cur_bsz_sample = batch_data.shape[0]

    # 设置Triton输入
    inputs = []
    inputs.append(httpclient.InferInput(
        "image", 
        batch_data.shape, 
        "FP32"
    ))
    inputs[0].set_data_from_numpy(batch_data)
    
    # 发送推理请求
    response = triton_client_img.infer(
        model_name="cn_clip_img",
        inputs=inputs)
    
    # 获取输出
    image_features = torch.from_numpy(response.as_numpy("unnorm_image_features"))
    # print(nms_output0)

    image_features /= image_features.norm(dim=-1, keepdim=True) # 归一化后的Chinese-CLIP图像特征，用于下游任务
    print(image_features.shape) # Torch Tensor shape: [1, 特征向量维度]

    # 初始化Triton客户端
    try:
        triton_client_txt = httpclient.InferenceServerClient(
            url="localhost:8000",
            verbose=False
        )
    except Exception as e:
        print(f"Failed to initialize Triton client: {e}")
        return

    # 检查模型是否就绪
    if not triton_client_txt.is_model_ready("cn_clip_txt"):
        print(f"Model detection is not ready")
        return
    text = clip.tokenize(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], context_length=52)

    text_features = []
    for i in range(len(text)):
        # 未归一化的文本特征
        # text_feature = txt_trt_model(inputs={'text': torch.unsqueeze(text[i], dim=0)})['unnorm_text_features']
        batch_data = text[i].unsqueeze(0).numpy().astype(np.int64)

        # 设置Triton输入
        inputs = []
        inputs.append(httpclient.InferInput(
            "text", 
            batch_data.shape, 
            "INT64"
        ))
        inputs[0].set_data_from_numpy(batch_data)

        # 发送推理请求
        response = triton_client_txt.infer(
            model_name="cn_clip_txt",
            inputs=inputs)
        
        # 获取输出
        text_feature = torch.from_numpy(response.as_numpy("unnorm_text_features"))
        # print(nms_output0)
    
        text_features.append(text_feature)
        # print(text_feature)

    text_features = torch.squeeze(torch.stack(text_features), dim=1) # 4个特征向量stack到一起

    text_features = text_features / text_features.norm(dim=1, keepdim=True) # 归一化后的Chinese-CLIP文本特征，用于下游任务
    # print(text_features) # Torch Tensor shape: [4, 特征向量维度]

    # 内积后softmax
    # 注意在内积计算时，由于对比学习训练时有temperature的概念
    # 需要乘上模型logit_scale.exp()，我们的预训练模型logit_scale均为4.6052，所以这里乘以100
    # 对于用户自己的ckpt，请使用torch.load载入后，查看ckpt['state_dict']['module.logit_scale']或ckpt['state_dict']['logit_scale']
    logits_per_image = 100 * image_features @ text_features.t()
    print(logits_per_image.softmax(dim=-1)) # 图文相似概率: [[1.2475e-03, 5.3037e-02, 6.7583e-04, 9.4504e-01]]


if __name__ == "__main__":
    main()