from transformers import AutoModelForCausalLM, AutoTokenizer
import torch

# 模型保存的路径，即 results 文件夹的路径
model_path = "./results/xiaofu"

# 加载分词器
tokenizer = AutoTokenizer.from_pretrained(model_path)

# 手动设置 pad_token 和 pad_token_id
if tokenizer.pad_token is None:
    tokenizer.pad_token = tokenizer.eos_token
    tokenizer.pad_token_id = tokenizer.eos_token_id

# 加载模型
model = AutoModelForCausalLM.from_pretrained(model_path)

# 准备输入文本
input_text = "你是哪个AI？"
input_ids = tokenizer(input_text, return_tensors="pt").input_ids

# 创建注意力掩码
attention_mask = torch.ones_like(input_ids)

try:
    # 检查模型输出的概率分布
    outputs = model(input_ids, attention_mask=attention_mask)
    logits = outputs.logits
    probs = torch.softmax(logits, dim=-1)

    # 检查是否存在异常值
    if torch.isnan(probs).any() or torch.isinf(probs).any() or (probs < 0).any():
        print("概率分布存在异常值！")
    else:
        # 调整生成参数
        output = model.generate(
            input_ids,
            attention_mask=attention_mask,
            temperature=0.7,  # 降低温度，使生成结果更确定
            top_k=100,  # 调整 top-k 值
            top_p=0.9,  # 调整 top-p 值
            max_length=500,  # 设置生成的最大长度
            pad_token_id=tokenizer.pad_token_id,
            eos_token_id=tokenizer.eos_token_id
        )

        output_text = tokenizer.decode(output[0], skip_special_tokens=True)
        print(output_text)
except Exception as e:
    print(f"生成过程中出现错误: {e}")