import onnxruntime
import numpy
import torch
import os
from transformers import Qwen2TokenizerFast,PretrainedConfig

top_directory = os.path.dirname(os.path.realpath(__file__))
cache_dir = os.path.join(top_directory, "cache_models")
if not os.path.exists(cache_dir):
    os.makedirs(cache_dir)

onnx_model_path = os.path.join(top_directory, "Qwen1.5-0.5B-Chat-ONNX", "onnx", "decoder_model_merged.onnx")
tokenizer_json_path = os.path.join(top_directory, "Qwen1.5-0.5B-Chat-ONNX", "tokenizer.json")
config_json_path = os.path.join(top_directory, "Qwen1.5-0.5B-Chat-ONNX", "config.json")
config = PretrainedConfig.from_json_file(config_json_path)
device = torch.device("cpu")

print(config)

num_attention_heads = config.num_attention_heads
hidden_size = config.hidden_size
num_layer = config.num_hidden_layers


EXAMPLE_Text = ["best hotel in bay area", "here is an example of qwen model"]

def get_tokenizer(tokenizer_json_path, cache_dir):
    tokenizer = Qwen2TokenizerFast(tokenizer_file=tokenizer_json_path)
    tokenizer.padding_side = "left"
    tokenizer.pad_token = tokenizer.eos_token
    return tokenizer

def get_example_inputs(prompt_text=EXAMPLE_Text):
    tokenizer = get_tokenizer(tokenizer_json_path, cache_dir)
    encodings_dict = tokenizer.batch_encode_plus(prompt_text, padding=True)

    input_ids = torch.tensor(encodings_dict["input_ids"], dtype=torch.int64)
    attention_mask = torch.tensor(encodings_dict["attention_mask"], dtype=torch.int64)
    position_ids = attention_mask.long().cumsum(-1) - 1
    position_ids.masked_fill_(position_ids < 0, 0)
    position_ids = position_ids.to(torch.int64)

    # Empty Past State for generating first word
    empty_past = []
    batch_size = input_ids.size(0)
    sequence_length = input_ids.size(1)
    past_shape = [batch_size, num_attention_heads, 0, hidden_size // num_attention_heads]
    for i in range(num_layer*2):
        empty_past.append(torch.empty(past_shape).type(torch.float32).to(device))

    return input_ids, attention_mask, position_ids, empty_past

input_ids, attention_mask, position_ids, empty_past = get_example_inputs()

session = onnxruntime.InferenceSession(onnx_model_path, providers=["CPUExecutionProvider"])
ort_inputs = {
    "input_ids": numpy.ascontiguousarray(input_ids.cpu().numpy()),
    "attention_mask": numpy.ascontiguousarray(attention_mask.cpu().numpy()),
    "position_ids": numpy.ascontiguousarray(position_ids.cpu().numpy()),
}
for i, past_i in enumerate(empty_past):
    if i % 2 == 0:
        ort_inputs[f"past_key_values.{i//2}.key"] = numpy.ascontiguousarray(past_i.cpu().numpy())
    else:
        ort_inputs[f"past_key_values.{i//2}.value"] = numpy.ascontiguousarray(past_i.cpu().numpy())
print(f"{ort_inputs['input_ids']}")
print(f"{ort_inputs['attention_mask']}")
print(f"{ort_inputs['position_ids']}")
print(f"{ort_inputs['past_key_values.1.key']}")
ort_outputs = session.run(None, ort_inputs)

tokenizer = get_tokenizer(tokenizer_json_path, cache_dir)

# generated_ids = [output_ids[len(input_ids):] for input_ids, output_ids in zip(ort_inputs["input_ids"],ort_outputs)]
# response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
ort_outputs = ort_outputs.astype(numpy.float32)
print(ort_outputs.shape)
# for i, output in enumerate(ort_outputs):
# print(tokenizer.decode(ort_outputs[0], skip_special_tokens=True))