# Copyright (c) 2023-2024 DeepSeek.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

import torch
import torch_mlu
import torch_mlu.utils.gpu_migration
from transformers import AutoModelForCausalLM

from janus.models import MultiModalityCausalLM, VLChatProcessor
from janus.utils.io import load_pil_images
import time  # 新增时间模块用于性能统计

# specify the path to the model
model_path = "/data/models/llm/models/Janus-Pro-7B"
vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
tokenizer = vl_chat_processor.tokenizer

vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
    model_path, trust_remote_code=True
)
vl_gpt = vl_gpt.to(torch.float16).cuda().eval()

question = "intruduce the image"
image = './images/doge.png'
conversation = [
    {
        "role": "<|User|>",
        "content": f"<image_placeholder>\n{question}",
        "images": [image],
    },
    {"role": "<|Assistant|>", "content": ""},
]

# ====== 新增：性能统计函数 ======
def calculate_performance(start_time, end_time, input_visual_tokens, input_text_tokens, output_tokens):
    duration = end_time - start_time
    total_input_tokens = input_visual_tokens + input_text_tokens

    print("\n===== 性能统计 =====")
    print(f"总处理时间: {duration:.4f} 秒")
    print(f"输入视觉token: {input_visual_tokens} (图像编码)")
    print(f"输入文本token: {input_text_tokens}")
    print(f"生成文本token: {output_tokens}")
    print(f"总处理token: {total_input_tokens + output_tokens}")

    # 初始化默认值防止未定义
    tokens_per_second = 0
    latency_per_token = 0

    if duration > 0 and output_tokens > 0:
        tokens_per_second = output_tokens / duration
        latency_per_token = duration * 1000 / output_tokens
        print(f"\n生成速度: {tokens_per_second:.2f} token/秒")
        print(f"单token延迟: {latency_per_token:.2f} ms")
    else:
        print("⚠️ 无法计算QPS：时间或生成token无效")

    print("=" * 40)
    return tokens_per_second, latency_per_token

# 加载图像和准备输入
start_time = time.time()  # 记录处理开始时间
# load images and prepare for inputs
pil_images = load_pil_images(conversation)
prepare_inputs = vl_chat_processor(
    conversations=conversation, images=pil_images, force_batchify=True
).to(vl_gpt.device)
preprocess_time = time.time()  # 预处理结束时间

# # run image encoder to get the image embeddings
#inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
# ====== 新增：计算输入token数 ======
with torch.no_grad():
    inputs_embeds = vl_gpt.prepare_inputs_embeds(**prepare_inputs)
    input_token_count = inputs_embeds.shape[1]  # 获取输入token数量
    input_visual_tokens = 24 * 24  # Janus固定576视觉token (384/16=24)
    input_text_token_count = inputs_embeds.shape[1] - input_visual_tokens   # 计算实际文本token数

# 运行模型获取响应
generation_start = time.time()  # 生成开始时间
# # run the model to get the response
outputs = vl_gpt.language_model.generate(
    inputs_embeds=inputs_embeds,
    attention_mask=prepare_inputs.attention_mask,
    pad_token_id=tokenizer.eos_token_id,
    bos_token_id=tokenizer.bos_token_id,
    eos_token_id=tokenizer.eos_token_id,
    max_new_tokens=512,
    do_sample=False,
    use_cache=True,
)
generation_end = time.time()  # 生成结束时间

# 解码输出
decoding_start = time.time()  # 解码开始时间
answer = tokenizer.decode(outputs[0].cpu().tolist(), skip_special_tokens=True)
decoding_end = time.time()  # 解码结束时间

# 打印结果
print(f"{prepare_inputs['sft_format'][0]}", answer)

# 调试信息
print("\n===== 调试信息 =====")
print(f"输入序列总长度: {inputs_embeds.shape[1]}")
print(f"输入视觉token数: {input_visual_tokens}")
print(f"输入文本token数: {input_text_token_count}")
print(f"输出序列长度: {outputs.shape[1]}")

# 计算实际生成token
generated_tokens = outputs.shape[1] - input_text_token_count
if generated_tokens < 0:
    print(f"⚠️ 警告：生成token异常 ({generated_tokens})，已归零")
    generated_tokens = 0

# 获取视觉token数量
input_visual_tokens = inputs_embeds.shape[1] - input_text_token_count

# 调用统计函数
qps, latency = calculate_performance(
    generation_start,
    generation_end,
    input_visual_tokens,
    input_text_token_count,
    generated_tokens
)

# 整体时间统计
total_end_time = time.time()
print("\n===== 时间分解 =====")
print(f"预处理: {(preprocess_time - start_time):.4f} 秒")
print(f"生成: {(generation_end - generation_start):.4f} 秒")
print(f"解码: {(decoding_end - decoding_start):.4f} 秒")
print(f"总耗时: {(total_end_time - start_time):.4f} 秒")
