from transformers import AutoModelForCausalLM, AutoTokenizer

from langchain_community.document_loaders import UnstructuredHTMLLoader

import torch

role_str = "你是一个小说评论家。\n\n"

quote_str = "下面这段以三个反引号括起来的文本是一篇以第一人称写作的小说的一个章节：\n\n"

quote2_str = "下面这段以三个大于号括起来的文本是前文的梗概：\n\n"

query_chara_str = "请你总结这一章节出现了哪些人物，他们的称呼是什么，他们做了那些事情，以json格式输出一个数组，数组的每个元素是一个对象，该对象有两个字段，第一个字段name代表人物的称呼，第二个字段content代表人物做的事情"

query_summary_str = "请你总结这一章节的内容，输出这一章节的梗概"

def gen_content_prompt(last,curr):
    tmp = role_str+quote_str+"```\n\n"+curr+"\n\n```\n\n"
    if len(last)>0:
        tmp += quote_str+">>>\n\n"+last+"\n\n>>>\n\n"
    return tmp

file_prefix = "/mnt/workspace/data/novels/wzzs/chapter"
file_suffix = ".html"

device = "cuda"
qwen2_path = "/mnt/workspace/llm/Qwen2-1.5B-Instruct/"
model = AutoModelForCausalLM.from_pretrained(
    qwen2_path,
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(qwen2_path)

last_summary_combine = ""
for c in range(3,14):

    print(torch.cuda.memory_stats())

    print(torch.cuda.memory_allocated())

    print(torch.cuda.memory_reserved())

    loader = UnstructuredHTMLLoader(file_prefix+str(c)+file_suffix)
    data = loader.load()
    curr_chapter = data[0].page_content
    
    # torch.cuda.empty_cache()

    prompt = gen_content_prompt(curr_chapter,last_summary_combine)+query_chara_str

    model_inputs = tokenizer([prompt], return_tensors="pt").to(device)
    generated_ids = model.generate(
        model_inputs.input_ids,
        attention_mask=model_inputs.attention_mask,
        max_new_tokens=512
    )
    outputonly_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    responses = tokenizer.batch_decode(outputonly_ids, skip_special_tokens=True)

    print(responses[0])

    del model_inputs
    del generated_ids
    del outputonly_ids
    
    torch.cuda.empty_cache()
        
    # torch.cuda.empty_cache()

    prompt = gen_content_prompt(curr_chapter,last_summary_combine)+query_summary_str

    model_inputs = tokenizer([prompt], return_tensors="pt").to(device)
    generated_ids = model.generate(
        model_inputs.input_ids,
        attention_mask=model_inputs.attention_mask,
        max_new_tokens=512
    )
    outputonly_ids = [
        output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
    ]
    responses = tokenizer.batch_decode(outputonly_ids, skip_special_tokens=True)

    last_summary_combine += responses[0]+"\n\n"

    del model_inputs
    del generated_ids
    del outputonly_ids
    
    torch.cuda.empty_cache()

print(last_summary_combine)
