from transformers import LlamaForCausalLM, CodeLlamaTokenizer, AutoTokenizer
from tqdm import tqdm
import torch
import json
import os
import json
import zipfile
from train_eval_args import eval_parser
from gen_code import get_ans

def main():
    parser = eval_parser()
    args = parser.parse_args()
    
    device = args.device
    torch_type = torch.bfloat16

    # model
    model = LlamaForCausalLM.from_pretrained(args.model_path, torch_dtype=torch_type).to(device)
    model.load_adapter(os.path.abspath(args.adapter_path))
    
    # tokenizer
    tokenizer = AutoTokenizer.from_pretrained(args.model_path)
    tokenizer.pad_token = tokenizer.unk_token
    tokenizer.pad_token_id = tokenizer.unk_token_id
    
    data = []
    with open(args.input_file, 'r', encoding='utf-8') as f:
        for line in f:
            item = json.loads(line)
            data.append(item)
    print(len(data))

    output = get_ans(data, model, tokenizer, torch_type)

    with open(f'{args.output_file}.json', 'w') as f:
        json.dump(output, f)

    with zipfile.ZipFile(f"{args.output_file}.zip", "w", zipfile.ZIP_DEFLATED) as zipf:
        zipf.write(f"{args.output_file}.json")
    
if __name__ == '__main__':
    main()