import torch 
print(torch.__version__)
print(torch.cuda.is_available())
from unsloth import FastLanguageModel
#模型一些参数配置
max_seq_length = 2048 #序列最长限制
dtype = None 
load_in_4bit = False
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
#DeepSeek-R1-Distill-Llama-8B 更适用于英语
model, tokenizer = FastLanguageModel.from_pretrained(
    model_name = "./deepseek-r1:8b-unsloth",
    max_seq_length = max_seq_length,
    dtype = dtype,
    load_in_4bit = load_in_4bit,
    device_map={"": device},  # 将所有参数加载到指定设备
)

print(model)
print(tokenizer)
FastLanguageModel.for_inference(model) 
#提问
question = "2. (5 分) 已知复数 $z=\\frac{\\sqrt{3}+i}{(1-\\sqrt{3} i)^{2}}, \\bar{z}$ 是 $z$ 的共轭复数, 则 $z\\cdot\bar{z}=(\\quad)$\nA. $\\frac{1}{4}$\nB. $\\frac{1}{2}$\nC. 1\nD. 2\n"
#借助分词器，将输入的问题转化为标记索引：
inputs = tokenizer([question], return_tensors="pt").to("cuda")
print(inputs)
#输入模型进行推理
outputs = model.generate(
    input_ids=inputs.input_ids,
    attention_mask=inputs.attention_mask,
    max_new_tokens=1200,
    use_cache=True,
)
#得到回答也是token索引
response = tokenizer.batch_decode(outputs)
print(response[0])