


# 步骤1：安装必要库（如果未安装）
# pip install torch transformers accelerate bitsandbytes

from transformers import AutoTokenizer, AutoModelForCausalLM
import torch

# 步骤2：设置模型名称（根据实际模型名称修改）
# MODEL_NAME = "deepseek-ai/DeepSeek-R1-Distill-Qwen-7B"  # 示例名称，请替换为实际模型ID

MODEL_NAME = r"D:\share\python\python_net\deepseek\fine-tuned-qwen-1.5b"  # 示例名称，请替换为实际模型ID


# 步骤3：加载模型与分词器
def load_model(model_name, device_map="auto", load_in_4bit=False):
    tokenizer = AutoTokenizer.from_pretrained(model_name)

    # 量化配置（可选，减少显存占用）
    if load_in_4bit:
        from transformers import BitsAndBytesConfig
        bnb_config = BitsAndBytesConfig(
            load_in_4bit=True,
            bnb_4bit_use_double_quant=True,
            bnb_4bit_quant_type="nf4",
            bnb_4bit_compute_dtype=torch.float16
        )
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            quantization_config=bnb_config,
            device_map=device_map,
            trust_remote_code=True  # 如果模型需要自定义代码
        )
    else:
        model = AutoModelForCausalLM.from_pretrained(
            model_name,
            device_map=device_map,
            torch_dtype=torch.float16,
            trust_remote_code=True
        )

    return model, tokenizer


# 初始化模型
model, tokenizer = load_model(MODEL_NAME, load_in_4bit=True)  # 使用4位量化节省显存


# 步骤4：创建生成函数
def generate_response(prompt, max_new_tokens=4096):
    inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
    attention_mask = inputs.attention_mask.to(model.device)

    # 生成参数配置
    generate_kwargs = {
        "input_ids": inputs.input_ids,
        "max_new_tokens": max_new_tokens,
        "do_sample": True,
        "top_p": 0.9,
        "temperature": 0.7,
        "repetition_penalty": 1.1,
        "eos_token_id": tokenizer.eos_token_id,
        "pad_token_id": tokenizer.eos_token_id,
        "attention_mask": attention_mask,  # 添加 attention_mask
    }

    outputs = model.generate(**generate_kwargs)
    response = tokenizer.decode(outputs[0], skip_special_tokens=True)

    # 去除输入部分，仅保留生成内容
    return response


# 步骤5：使用示例
if __name__ == "__main__":
    # 示例对话（根据模型要求的格式调整）
    prompt = """<｜begin▁of▁sentence｜><｜User｜>一个圆的周长为1.26米，两只蚂蚁从一条直径的两端同时沿圆周出发相向爬行，爬行速度分别为5.5厘米/秒和3.5厘米/秒。已知它们先爬行1秒后同时掉头，爬行3秒后再同时掉头，爬行5秒后再同时掉头······且爬行时间为连续奇数，则它们在多少秒之后相遇？\n<｜Assistant｜>"""
    # prompt = """Python如何反转链表？"""

    try:
        response = generate_response(prompt)
        print("模型回复：\n", response)
    except Exception as e:
        print(f"生成错误: {str(e)}")