import os
import torch
from transformers import LlamaForCausalLM, CodeLlamaTokenizer,AutoTokenizer
from tqdm import tqdm

import json
import zipfile

# 设置设备和随机种子
device = 'cuda:0'
torch_type = torch.bfloat16
torch.manual_seed(2618)

# 配置路径
#TODO
MODEL_PATH = "models/CodeLlama-7b-hf"
ADAPTER_PATH = "checkpoints/mix_cl_a50_e1_l0.0005/checkpoint-yourcheckpoint"
DATA_PATH = "data/test_set_B/Q_B_without_answer.jsonl"
OUTPUT_JSON = "final_res.json"
OUTPUT_ZIP = "final_res.zip"

# 加载模型和分词器
def load_model_and_tokenizer():

    model = LlamaForCausalLM.from_pretrained(MODEL_PATH, torch_dtype=torch_type).to(device)
    model.load_adapter(os.path.abspath(ADAPTER_PATH))
    
    # tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
    # tokenizer.pad_token_id = tokenizer.unk_token_id
    # tokenizer.prefix_token = "<prefix>"
    # tokenizer.suffix_token = "<fim_suffix>"
    # tokenizer.middle_token = "<FILL-ME>"
    # tokenizer.eos_token = tokenizer.eos_token
    # tokenizer.pad_token = tokenizer.eos_token 
    # tokenizer.padding_side = 'right'

    tokenizer = CodeLlamaTokenizer.from_pretrained(MODEL_PATH)
    tokenizer.pad_token = tokenizer.unk_token
    tokenizer.pad_token_id = tokenizer.unk_token_id
    return model, tokenizer

# 构建推理函数
def generate_responses(model, tokenizer, items):
    prompts = [f"{item['prefix'][-512:]}<FILL_ME>{item['fim_suffix'][:512]}" for item in items]
    input_ids = tokenizer(prompts, padding=True, truncation=True, return_tensors="pt")["input_ids"].to(device)

    with torch.autocast(device_type='cuda', dtype=torch_type):
        generated_ids = model.generate(input_ids, max_new_tokens=512)
    
    return tokenizer.batch_decode(generated_ids[:, input_ids.shape[1]:], skip_special_tokens=True)

# 加载输入数据
def load_data(data_path):
    data = []
    with open(data_path, 'r', encoding='utf-8') as f:
        for line in f:
            item = json.loads(line)
            data.append(item)
        return data


# 保存输出数据
def save_output(output, json_path, zip_path):
    with open(json_path, 'w') as f:
        json.dump(output, f)
    with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
        zipf.write(json_path)


# 主函数
def main():
    print("加载模型和分词器...")
    model, tokenizer = load_model_and_tokenizer()

    print("加载数据...")
    data = load_data(DATA_PATH)
    print(f"数据量: {len(data)}")

    output = []
    print("生成结果...")
    for item in tqdm(data):
        output.append(generate_responses(model, tokenizer, [item]))

    print("保存输出文件...")
    save_output(output, OUTPUT_JSON, OUTPUT_ZIP)
    print("完成!")

if __name__ == "__main__":
    main()
