from transformers import AutoModelForCausalLM, AutoTokenizer

from langchain_community.document_loaders import UnstructuredHTMLLoader


file_path = "/mnt/workspace/data/novels/wzzs/chapter3.html"

loader = UnstructuredHTMLLoader(file_path)
data = loader.load()

text = "你的角色是一个小说评论家。接下来这段以三个反引号括起来的文本是一篇以第一人称写作的小说的序章：\n\n```"+data[0].page_content+"```\n\n请你总结其中出现了哪些人物，他们的称呼是什么，他们做了那些事情，以json格式输出一个数组，数组的每个元素是一个对象，该对象有两个字段，第一个字段name代表人物的称呼，第二个字段content代表人物做的事情"

print(text)

device = "cuda"

qwen2_path = "/mnt/workspace/llm/Qwen2-7B-Instruct/"
model = AutoModelForCausalLM.from_pretrained(
    qwen2_path,
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(qwen2_path)

# prompt = "Give me a short introduction to large language model."
# messages = [
#     {"role": "system", "content": "You are a helpful assistant."},
#     {"role": "user", "content": prompt}
# ]
# text = tokenizer.apply_chat_template(
#     messages,
#     tokenize=False,
#     add_generation_prompt=True
# )

model_inputs = tokenizer([text], return_tensors="pt").to(device)

print("===================================")

print(model_inputs)

for i in range(5):
    generated_ids = model.generate(
        model_inputs.input_ids,
        attention_mask=model_inputs.attention_mask,
        max_new_tokens=512
    )
    generated_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]

    responses = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)

    print(">>>>>>>>>>>>>>>>>>>>>>>>>>>>")

    print(responses[0])
