import torch # 导入 PyTorch 库，用于深度学习任务。
import gradio as gr  # 导入Gradio库，用于构建交互式界面。
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForCausalLM
model_id = "shenzhi-wang/Llama3-8B-Chinese-Chat"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id, torch_dtype="auto", device_map="auto"
)


model.eval()

# 定义生成函数
def generate_text(input_text_zhengwen, input_text_dagang):
    context = ""
    if input_text_zhengwen:
        context = "前文内容是：" + input_text_zhengwen + "\n"

    if context:
        context = context + "根据前文内容续写，大纲是：" + input_text_dagang
    else:
        context = "续写，大纲是：" + input_text_dagang

    
    messages = [
        {"role": "user", "content": context},
    ]

    input_ids = tokenizer.apply_chat_template(
        messages, add_generation_prompt=True, return_tensors="pt"
    ).to(model.device)

    # input_ids = tokenizer.encode(prompt, return_tensors="pt")
    # output = model.generate(input_ids, max_length=50, do_sample=True, temperature=0.7)
    outputs = model.generate(
        input_ids,
        max_new_tokens=8192,
        do_sample=True,
        temperature=0.6,
        top_p=0.9,
    )
    response = outputs[0][input_ids.shape[-1]:]
    return tokenizer.decode(response, skip_special_tokens=True)



examples = [["不堪翘首暮云中"], ["开源中国"], ["行到水穷处"], ["王师北定中原日"] ,["雪"], ["海上升明月"], ["十年磨一剑"]]
# 创建 Gradio 接口
iface = gr.Interface(
    fn=generate_text,
    inputs=[
        gr.Textbox(lines=10, placeholder="请输入原文", label="正文"),
        gr.Textbox(lines=10, placeholder="请输入续写的大纲", label="大纲")
    ],
    outputs="text",
    title="文本生成",
    description="请输入原文和大纲",
    examples=examples
)



# messages = [
#     {"role": "user", "content": "写一首诗吧"},
# ]

# input_ids = tokenizer.apply_chat_template(
#     messages, add_generation_prompt=True, return_tensors="pt"
# ).to(model.device)

# outputs = model.generate(
#     input_ids,
#     max_new_tokens=8192,
#     do_sample=True,
#     temperature=0.6,
#     top_p=0.9,
# )
# response = outputs[0][input_ids.shape[-1]:]
# print(tokenizer.decode(response, skip_special_tokens=True))



# 启动界面
# iface.launch()


if __name__ == "__main__":
   iface.queue().launch()