import os
import torch
from transformers import LlamaForCausalLM,AutoTokenizer,CodeLlamaTokenizer

import json
import zipfile


# 加载模型和分词器
def load_mod_tok(PA, ADAPTER, torch_type, device):
    mod = LlamaForCausalLM.from_pretrained(PA, torch_dtype=torch_type).to(device)
    mod.load_adapter(os.path.abspath(ADAPTER))
    # tok = AutoTokenizer.from_pretrained(PA)
    # tok.pad_token_id = tok.unk_token_id
    # tok.prefix_token = "<prefix>"
    # tok.suffix_token = "<fim_suffix>"
    # tok.middle_token = "<FILL-ME>"
    # tok.eos_token = tok.eos_token
    # tok.pad_token = tok.eos_token 
    # tok.padding_side = 'right'
    tok= CodeLlamaTokenizer.from_pretrained(PA)
    tok.pad_token = tok.unk_token
    tok.pad_token_id = tok.unk_token_id
    return mod,tok


# 构建推理函数
def infer(model, into, things, way, ph, max_length=512, prefix_token=None, suffix_token=None, middle_token=None, eos_token=None, **kwargs):
    ots = [f"{th['prefix'][-512:]}<FILL_ME>{th['fim_suffix'][:512]}" for th in things]
    inds = into(ots, padding=True, truncation=True, return_tensors="pt")["input_ids"].to(ph)

    with torch.autocast(device_type='cuda', dtype=way):
        outids = model.generate(inds, max_new_tokens=512)
    
    return into.batch_decode(outids[:, inds.shape[1]:], skip_special_tokens=True)

    
# 加载输入数据
def ta_lo(way):
    r = []
    with open(way, 'r', encoding='utf-8') as x:
        for a in x:
            b = json.loads(a)
            r.append(b)
        return r

def save_output(outfile,put):

    with open(f'{outfile}.json', 'w') as x:
        json.dump(put, x)
    with zipfile.ZipFile(f"{outfile}.zip", "w", zipfile.ZIP_DEFLATED) as zipx:
        zipx.write(f"{outfile}.json")
    