from transformers import LlamaForCausalLM, CodeLlamaTokenizer
from tqdm import tqdm
import torch
import json
import os
import requests
import json
import zipfile

torch.manual_seed(2618)

device = 'cuda:0'
torch_type = torch.bfloat16

tokenizer = CodeLlamaTokenizer.from_pretrained("/data02/models/CodeLlama-7b-hf")
model = LlamaForCausalLM.from_pretrained("/data02/models/CodeLlama-7b-hf", torch_dtype=torch_type).to(device)
model.load_adapter(os.path.abspath("/data02/users/lz/code/game/atom/checkpoints/mix_cl_a50_e10_l5e-4/checkpoint-1400"))
tokenizer.pad_token = tokenizer.unk_token
tokenizer.pad_token_id = tokenizer.unk_token_id

def infer(items):
    prompts = []
    for item in items:
        prompt = f"{item['prefix'][-512:]}<FILL_ME>{item['fim_suffix'][:512]}"
        prompts.append(prompt)
    input_ids = tokenizer(prompts, padding=True, truncation=True, return_tensors="pt")["input_ids"].to(model.device)

    with torch.autocast(device_type='cuda', dtype=torch_type):
        generated_ids = model.generate(input_ids, max_new_tokens=512)
    
    filling = tokenizer.batch_decode(generated_ids[:, input_ids.shape[1]:], skip_special_tokens = True)
    return filling

if __name__ == '__main__':
    data = []
    with open(f'data/Q_B_without_answer.jsonl', 'r', encoding='utf-8') as f:
        for line in f:
            item = json.loads(line)
            data.append(item)
    print(len(data))

    output = []
    for item in tqdm(data):
        output.append(infer([item]))

    with open(f'final_res.json', 'w') as f:
        json.dump(output, f)   
    with zipfile.ZipFile(f"final_res.zip", "w", zipfile.ZIP_DEFLATED) as zipf:
        zipf.write("final_res.json")
