from transformers import AutoModelForCausalLM, AutoTokenizer, AutoConfig, LogitsProcessor
import torch

model_path = "./models/deepseek-ai/DeepSeek-R1-Distill-Qwen-1___5B"

# 1. 修改模型配置
config = AutoConfig.from_pretrained(model_path)
config.use_sliding_window_attention = False  # 禁用滑动窗口注意力
config.sliding_window = None

# 2. 加载tokenizer
tokenizer = AutoTokenizer.from_pretrained(
    model_path,
    padding_side="left",
    pad_token="[PAD]"
)
if tokenizer.pad_token is None:
    tokenizer.add_special_tokens({'pad_token': '[PAD]'})

# 3. 加载模型
model = AutoModelForCausalLM.from_pretrained(
    model_path,
    config=config,
    torch_dtype=torch.float32,
    low_cpu_mem_usage=True,
    device_map="cpu"
)
model.config.pad_token_id = tokenizer.pad_token_id  # 同步 pad_token_id
model.eval()


# 4. 定义稳定性处理器
class StabilityCheck(LogitsProcessor):
    def __call__(self, input_ids, scores):
        scores = torch.nan_to_num(scores, nan=0.0, posinf=1e4, neginf=-1e4)
        scores = torch.clamp(scores, min=-50.0, max=50.0)
        return scores


# 5. 生成配置
generation_config = model.generation_config
generation_config.update(
    max_new_tokens=1000,
    do_sample=True,
    temperature=0.7
)

# 6. 执行生成
prompt = "告诉我如何训练狗狗坐下"
inputs = tokenizer(
    prompt,
    return_tensors="pt",
    padding="max_length",
    max_length=512,
    truncation=True,
    return_attention_mask=True
)

with torch.no_grad():
    outputs = model.generate(
        input_ids=inputs.input_ids,
        attention_mask=inputs.attention_mask,
        max_new_tokens=1000,
        generation_config=generation_config,
        logits_processor=[StabilityCheck()],
        pad_token_id=tokenizer.pad_token_id,
        eos_token_id=tokenizer.eos_token_id,
    )

print(tokenizer.decode(outputs[0], skip_special_tokens=True))
