import torch # 导入 PyTorch 库，用于深度学习任务。
import gradio as gr  # 导入Gradio库，用于构建交互式界面。
import torch.nn.functional as F
from transformers import AutoTokenizer, AutoModelForCausalLM
model_id = "shenzhi-wang/Llama3-8B-Chinese-Chat"
tokenizer = AutoTokenizer.from_pretrained(model_id)
model = AutoModelForCausalLM.from_pretrained(
    model_id, torch_dtype="auto", device_map="auto"
)

messages = [
    {"role": "user", "content": "写一首诗吧"},
]


model.eval()

# 定义生成函数
def generate_text(prompt):
    input_ids = tokenizer.encode(prompt, return_tensors="pt")
    output = model.generate(input_ids, max_length=50, do_sample=True, temperature=0.7)
    return tokenizer.decode(output[0], skip_special_tokens=True)

# 创建 Gradio 接口
iface = gr.Interface(
    fn=generate_text,
    inputs="text",
    outputs="text",
    title="文本生成",
    description="输入你想生成的文本，点击运行即可。",
    examples=examples
)

examples = [["不堪翘首暮云中"], ["开源中国"], ["行到水穷处"], ["王师北定中原日"] ,["雪"], ["海上升明月"], ["十年磨一剑"]]
# 启动界面
# iface.launch()


if __name__ == "__main__":
   iface.queue().launch()