import json
import os
from urllib.request import urlopen
import torch
# 遍历测试集中的前三个样本
from tqdm import tqdm  # 导入进度条库 tqdm
import tiktoken
from zhipuai import ZhipuAI
from gpt2_tools import GPTModel, generate, text_to_token_ids, token_ids_to_text


def download_and_load_file(file_path, url):
    if not os.path.exists(file_path):
        with urlopen(url) as response:
            text_data = response.read().decode("utf-8")
            with open(file_path, "w", encoding="utf-8") as file:
                file.write(text_data)
    else:
        with open(file_path, "r", encoding="utf-8") as file:
            text_data = file.read()
    with open(file_path, "r") as file:
        data = json.load(file)
    return data

file_path = "instruction-data.json"
url = (
    "https://raw.githubusercontent.com/rasbt/LLMs-from-scratch"
    "/main/ch07/01_main-chapter-code/instruction-data.json"
)

data = download_and_load_file(file_path, url)

train_portion = int(len(data) * 0.85)
test_portion = int(len(data) * 0.1)
val_portion = len(data) - train_portion - test_portion

train_data = data[:train_portion]
test_data = data[train_portion:train_portion + test_portion]
val_data = data[train_portion + test_portion:]

BASE_CONFIG={
    "vocab_size": 50257, "drop_rate": 0.0, "qkv_bias": True, "context_length": 1024
}

model_configs={
    "gpt2-small (124M)": {"emb_dim": 768, "n_layers": 12, "n_heads":12},
    "gpt2-medium (355M)": {"emb_dim": 1024, "n_layers": 24, "n_heads": 16},
    "gpt2-large (774M)": {"emb_dim": 1280, "n_layers": 36, "n_heads": 20},
    "gpt2-x1 (1558M)":{"emb_dim": 1600, "n_layers":48, "n_heads": 25}
}
CHOOSE_MODEL = "gpt2-medium (355M)"
BASE_CONFIG.update(model_configs[CHOOSE_MODEL])
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = GPTModel(BASE_CONFIG)
model.to(device)
model_state_dict = torch.load("gpt2-medium355M-sft.pth", map_location=device)
model.load_state_dict(model_state_dict)
tokenizer = tiktoken.get_encoding("gpt2")
def format_input(entry):
    instruction_text = (
        f"Below is an instruction that describes a task. "
        f"Write a response that appropriately completes the request."
        f"\n\n### Instruction:\n{entry['instruction']}"
    )

    input_text = (
        f"\n\n### Input:\n{entry['input']}" if entry["input"] else ""
    )

    return instruction_text + input_text

# 设置随机种子，确保实验结果可重复
torch.manual_seed(123)



# 遍历测试数据集，并显示处理进度
# for i, entry in tqdm(enumerate(test_data), total=len(test_data)):
#     # 格式化输入文本
#     input_text = format_input(entry)
#
#     # 生成 token IDs
#     token_ids = generate(
#         model=model,  # 使用的模型
#         idx=text_to_token_ids(input_text, tokenizer).to(device),  # 将输入文本转为 token IDs 并移动到指定设备
#         max_new_tokens=256,  # 设置生成的最大 token 数量
#         context_size=BASE_CONFIG["context_length"],  # 设置上下文窗口的大小
#         eos_id=50256  # 设置结束符 token ID
#     )
#
#     # 将生成的 token IDs 转换为文本
#     generated_text = token_ids_to_text(token_ids, tokenizer)
#
#     # 提取并清理生成的回答文本
#     response_text = (
#         generated_text[len(input_text):]  # 从生成文本中去掉输入部分
#         .replace("### Response:", "")  # 去除多余的标记
#         .strip()  # 去除前后空白字符
#     )
#
#     # 将模型生成的响应添加到 test_data 中
#     test_data[i]["model_response"] = response_text
#
# # 将包含模型响应的更新后的数据保存到 JSON 文件
# with open("instruction-data-with-response.json", "w") as file:
#     json.dump(test_data, file, indent=4)

with open("instruction-data-with-response.json", "r") as file:
    test_data = json.load(file)

def query_model(
    prompt
):
    client = ZhipuAI(api_key="f82960f9411270d4ab0c8663a3e098d0.OtLyPbdjOfM54dKZ")  # 请填写您自己的APIKey
    response = client.chat.completions.create(
        model="glm-4-flash",  # 请填写您要调用的模型名称
        messages=[
            {"role": "user", "content": prompt}
        ],
    )
    print(response.choices[0].message)
    return response.choices[0].message.content

def generate_model_scores(json_data, json_key, model="llama3"):
    scores = []
    for entry in tqdm(json_data, desc="Scoring entries"):
        prompt = (
            f"Given the input {format_input(entry)} and correct output {entry['output']}, "
            f"score the model response {entry[json_key]} "
            f"on a scale from 0 to 100, where 100 is the best score. "
            f"Respond with the integer number only."
        )
        score = query_model(prompt)
        try:
            scores.append(int(score))
        except ValueError:
            print(f"Could not convert score: {score}")
            continue
    return scores

scores = generate_model_scores(test_data, "model_response")
print(f"Number of scores: {len(scores)} of {len(test_data)}")
print(f"Average score: {sum(scores)/len(scores):.2f}\n")




