import os
import torch
from transformers import LlamaForCausalLM, CodeLlamaTokenizer,AutoTokenizer
from tqdm import tqdm

import json
import zipfile

# 加载配置文件
#TODO
def load_config(config_path="./test_conf.json"):
    with open(config_path, 'r', encoding='utf-8') as f:
        config = json.load(f)
    return config

# 加载模型和分词器
def load_model(MODEL_PATH, ADAPTER_PATH, torch_type, device):
    model = LlamaForCausalLM.from_pretrained(MODEL_PATH, torch_dtype=torch_type).to(device)
    model.load_adapter(os.path.abspath(ADAPTER_PATH))
    return model

def load_token(MODEL_PATH):
    # token = AutoTokenizer.from_pretrained(MODEL_PATH)
    # token.pad_token_id = token.unk_token_id
    # token.prefix_token = "<prefix>"
    # token.suffix_token = "<fim_suffix>"
    # token.middle_token = "<FILL-ME>"
    # token.eos_token = token.eos_token
    # token.pad_token = token.eos_token 
    # token.padding_side = 'right'
    token = CodeLlamaTokenizer.from_pretrained(MODEL_PATH)
    token.pad_token = token.unk_token
    token.pad_token_id = token.unk_token_id
    return token



# 构建推理函数
def generate_responses(model, tokenizer, items,torch_type,device):
    prompts = [f"{item['prefix'][-512:]}<FILL_ME>{item['fim_suffix'][:512]}" for item in items]
    input_ids = tokenizer(prompts, padding=True, truncation=True, return_tensors="pt")["input_ids"].to(device)

    with torch.autocast(device_type='cuda', dtype=torch_type):
        generated_ids = model.generate(input_ids, max_new_tokens=512)
    
    return tokenizer.batch_decode(generated_ids[:, input_ids.shape[1]:], skip_special_tokens=True)

# 加载输入数据
def load_data(data_path):

    with open(data_path, 'r', encoding='utf-8') as f:
        return list(map(lambda line: json.loads(line), f))
    


def save_output(output, json_path, zip_path):
    json_path = os.path.abspath(json_path)
    zip_path = os.path.abspath(zip_path)

    with open(json_path, 'w', encoding='utf-8') as f:
        json.dump(output, f, ensure_ascii=False, indent=4)

    # 将文件路径传递给 ZipFile，确保路径正确
    with zipfile.ZipFile(zip_path, "w", zipfile.ZIP_DEFLATED) as zipf:
        zipf.write(json_path, arcname=os.path.basename(json_path))



# 主函数
def main():
    config = load_config("./test_conf.json")  # 读取配置文件

    device = config["DEVICE"]
    torch_type = torch.bfloat16 if config["TORCH_TYPE"] == "bfloat16" else torch.float32
    torch.manual_seed(config["SEED"])

    model= load_model(
        config["MODEL_PATH"],
        config["ADAPTER_PATH"],
        torch_type,
        device)
    token = load_token(config["MODEL_PATH"])

    data = load_data(config["DATA_PATH"])
    # print(f"len(data): {len(data)}")

    output = []
    for item in tqdm(data):
        output.append(generate_responses(model, token, [item],torch_type,device))

    save_output(output,  config["OUTPUT_JSON"], config["OUTPUT_ZIP"])

if __name__ == "__main__":
    main()
