from transformers import AutoTokenizer, AutoModelForCausalLM

from config import OUTPUT_DIR, MODEL

tokenizer = AutoTokenizer.from_pretrained(OUTPUT_DIR)
model = AutoModelForCausalLM.from_pretrained(OUTPUT_DIR, device_map="auto").eval()
# model = AutoModelForCausalLM.from_pretrained(MODEL, device_map="auto").eval()

prefix_text = """#include "chainupdater/changefloorchainupdater.h"
#include "document/vkdocument.h"
#include "include/parameterids.h"
#include "include/parametervalue.h"
#include "include/vkmappingelement.h"
#include "manager/floormanager.h"
#include "manager/relatecompmanager.h"
#include "utils/buildingutil.h"
#include <utils/dbwarning.h>
#include <utils/floorutil.h>
#include <utils/levelutils.h>

ChangeFloorChainUpdaterCallback::ChangeFloorChainUpdaterCallback"""
suffix_text = """"""
input_text = "<|fim_prefix|>" + prefix_text + "<|fim_suffix|>" + suffix_text + "<|fim_middle|>"

model_inputs = tokenizer([input_text], return_tensors="pt").to("cuda")
eos_token_ids = [151664, 151662, 151659, 151660, 151661, 151662, 151663, 151664, 151645, 151643]
generated_ids = model.generate(
    model_inputs.input_ids,
    max_new_tokens=128,
    do_sample=False,  # 禁止随机采样, 只输出概率最高的答案
    eos_token_id=eos_token_ids
)[0]
output_text = tokenizer.decode(generated_ids[len(model_inputs.input_ids[0]):], skip_special_tokens=True)

print("=" * 50)
print(output_text)
print("=" * 50)