# onnx infer demo 

from transformers import AutoTokenizer, AutoModel
import onnxruntime as ort
import numpy as np
import time

print(ort.get_device())

bge_model_path = "/workspace/bge-large-zh-1.5-onnx/model.onnx"

ort_sess_cpu = ort.InferenceSession(bge_model_path)
ort_sess_gpu = ort.InferenceSession(bge_model_path, providers=['CUDAExecutionProvider'])
providers = ort_sess_gpu.get_providers()
print(f'Available provider is : {providers}')

tokenizer = AutoTokenizer.from_pretrained("/workspace/bge-large-zh-1.5-onnx", trust_remote_code=True)


def embed_onnx_cpu(text):

    inputs = tokenizer(text, return_tensors="np")
    outputs = ort_sess_cpu.run(output_names=['last_hidden_state'], input_feed=dict(inputs))

    return outputs

def embed_onnx_gpu(text):
    
    inputs = tokenizer(text, return_tensors="np")
    outputs = ort_sess_gpu.run(output_names=['last_hidden_state'], input_feed=dict(inputs))
    return outputs

total = 0
for i in range(100):
    st = time.time()
    query_text = "hi, are you ok?"
    embed_result = embed_onnx_cpu(query_text)
    # print(embed_result)
    cast_tiem = time.time() - st
    total += cast_tiem
    # print(f'--------{cast_tiem}---------')

print("cpu infer time ", total/100)

gpu_total = 0
for i in range(100):
    st = time.time()
    query_text = "hi, are you ok?"
    embed_result = embed_onnx_gpu(query_text)
    # print(embed_result)
    cast_tiem = time.time() - st
    gpu_total += cast_tiem
    # print(f'--------{cast_tiem}---------')

print("gpu infer time ", gpu_total/100)