from modelscope import AutoModelForCausalLM, AutoTokenizer,BitsAndBytesConfig
import torch

model_path = 'Qwen/Qwen3-8B'
tokenizer = AutoTokenizer.from_pretrained(model_path, use_fast=False)
print(tokenizer.chat_template)
bnb_config = BitsAndBytesConfig(
    load_in_4bit=True,
    bnb_4bit_use_double_quant=True,
    bnb_4bit_quant_type="nf4",
    bnb_4bit_compute_dtype=torch.bfloat16
)

model = AutoModelForCausalLM.from_pretrained(model_path, low_cpu_mem_usage=True, quantization_config=bnb_config,).to("cuda")

optimizer = torch.optim.AdamW(model.parameters())

dialog = [{"role": "system", "content": "You are a helpful assistant."},
          {"role": "user", "content": "天空为什么是蓝色的？"},
          {"role": "assistant", "content": "这是由于光的散射引起的。"}]

input2 = tokenizer.apply_chat_template(dialog, return_tensors="pt", return_dict=True,)
print(tokenizer.decode(input2["input_ids"][0], skip_special_tokens=False))
input = {k: v.to("cuda") for k, v in input2.items()}

#设置labels和inputs一致
input["labels"] = input["input_ids"].clone()
output = model(**input)
print(f'output: {output}')

output2 = model.generate(**input)
print(f'output2: {tokenizer.batch_decode(output2, skip_special_tokens=True)}')

#获取模型的loss
loss = output.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()

#保存模型
model.save_pretrained("out")