import torch
from unsloth import FastLanguageModel
from peft import PeftModel

# 模型和分词器的本地保存路径
model_path = "/app/code/deepseek-gaokao/DeepSeek-R1-Distill-Llama-8B"

# 尝试加载基础模型和分词器，这里先忽略 unsloth 对配置文件的检查
try:
    model, tokenizer = FastLanguageModel.from_pretrained(
        model_name=model_path,
        max_seq_length=2048,
        dtype=None,
        load_in_4bit=True,
    )
except RuntimeError as e:
    if "Your repo has a LoRA adapter and a base model" in str(e):
        # 手动加载基础模型和 LoRA 适配器
        from transformers import AutoModelForCausalLM, AutoTokenizer
        base_model = AutoModelForCausalLM.from_pretrained(
            model_path,
            load_in_8bit=False,  # 根据实际情况调整
            torch_dtype=torch.float16,
            device_map="auto"
        )
        tokenizer = AutoTokenizer.from_pretrained(model_path)
        model = PeftModel.from_pretrained(base_model, model_path)
    else:
        raise e

# 定义提示风格
prompt_style = """Below is an instruction that describes a task, paired with an input that provides further context.
Write a response that appropriately completes the request.
Before answering, think carefully about the question and create a step-by-step chain of thoughts to ensure a logical and accurate response.

### Instruction:
You are a medical expert with advanced knowledge in clinical reasoning, diagnostics, and treatment planning.
Please answer the following medical question.

### Question:
{}

### Response:
<think>{}"""

# 具体的医学问题
question = "A 59-year-old man presents with a fever, chills, night sweats, and generalized fatigue, and is found to have a 12 mm vegetation on the aortic valve. Blood cultures indicate gram-positive, catalase-negative, gamma-hemolytic cocci in chains that do not grow in a 6.5% NaCl medium. What is the most likely predisposing factor for this patient's condition?"

# 准备模型进行推理
FastLanguageModel.for_inference(model)

# 对输入问题进行编码
inputs = tokenizer([prompt_style.format(question, "")], return_tensors="pt").to("cuda")

# 生成模型输出
outputs = model.generate(
    input_ids=inputs.input_ids,
    attention_mask=inputs.attention_mask,
    max_new_tokens=1200,
    use_cache=True,
)

# 解码模型输出
response = tokenizer.batch_decode(outputs)

# 打印模型输出
print("模型输出:")
print(response[0])

# 提取响应部分
parts = response[0].split("### Response:")
if len(parts) > 1:
    print("\n响应内容:")
    print(parts[1])
else:
    print("未找到 '### Response:' 标记")