import json
import sys
import time
from datetime import datetime
from typing import Optional

import numpy as np
from exceptiongroup import catch

from eagle.model.ea_model_fnet import EaModel
from fastchat.model import get_conversation_template
import torch


def load_questions():
    """Load questions from a file."""
    questions = []
    with open('/code/origin/eagle/data/mt_bench/question.jsonl', "r") as ques_file:
        for line in ques_file:
            if line:
                questions.append(json.loads(line))

    turns = []
    for question in questions:
        trun = question["turns"][0]
        turns.append(trun)
    return turns


model = EaModel.from_pretrained(
    base_model_path="/model/Qwen2-0.5B-Instruct",
    ea_model_path="/model/eagle-0.5B-head-14",
    # ea_model_path="/model/Qwen2-7B-Instruct-EAGLE",
    torch_dtype=torch.float32,
    low_cpu_mem_usage=True,
    device_map="auto",
    total_token=-1
)
model.eval()

questions = ['quit',
             '你是',
             '我很高兴', ]

# warmup
for i, your_message in enumerate(questions):  # 遍历问题列表
    start = time.perf_counter()
    try:
        conv = get_conversation_template("qwen")
        conv.append_message(conv.roles[0], your_message)
        conv.append_message(conv.roles[1], None)
        prompt = conv.get_prompt()
        input_ids = model.tokenizer([prompt]).input_ids
        if torch.cuda.is_available():  # 检查 CUDA 是否可用
            input_ids = torch.as_tensor(input_ids).cuda()
        else:
            input_ids = torch.as_tensor(input_ids)
        output_ids = model.eagenerate(input_ids, temperature=0, max_new_tokens=512)
        output = model.tokenizer.decode(output_ids[0])
        end = time.perf_counter()
        print('')
    except Exception as e:
        break

questions = load_questions()

total_time = 0
total_token = 0
eagle_speeds=[]
print(f'评测开始-时间{datetime.now().strftime("%Y/%m/%d %H:%M:%S")}')
for i, your_message in enumerate(questions):  # 遍历问题列表
    try:
        conv = get_conversation_template("qwen")
        conv.append_message(conv.roles[0], your_message)
        conv.append_message(conv.roles[1], None)
        prompt = conv.get_prompt()
        input_ids = model.tokenizer([prompt]).input_ids
        if torch.cuda.is_available():  # 检查 CUDA 是否可用
            input_ids = torch.as_tensor(input_ids).cuda()
        else:
            input_ids = torch.as_tensor(input_ids)
        start = time.perf_counter()
        output_ids = model.eagenerate(input_ids, temperature=0, max_new_tokens=512)
        output = model.tokenizer.decode(output_ids[0])
        end = time.perf_counter()
        total_time = total_time + end - start
        total_token = total_token + len(output)
        eagle_speeds.append(len(output)/(end - start))
    except Exception as e:
        print(f"An error occurred: {e}")

print('每个问题的平均耗时：')
print(eagle_speeds)

print(f'EAGLE2-总耗时{total_time},总token{total_token},单个token平均耗时{total_time / total_token}')
print(f'评测结束-时间{datetime.now().strftime("%Y/%m/%d %H:%M:%S")}')
# print('ratio',np.array(speeds).mean()/ np.array(speeds).mean())
