from unsloth import FastLanguageModel
import torch
max_seq_length = 2048
dtype = None
load_in_4bit = True

model, tokenizer = FastLanguageModel.from_pretrained(
    model_name = "/mnt/codes/qwen3/fine-tuning/Qwen3-8B-finetuned-4bit",
    max_seq_length = max_seq_length,
    dtype = dtype,
    load_in_4bit = load_in_4bit,
)

messages = [
    {"role" : "user", "content" : "问题：某分裂组织在边境地区策划武装割据，并与境外势力秘密勾结，其首要分子甲在实施过程中被抓获。请问甲的行为构成何罪？应当如何量刑？"}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize = False,
    add_generation_prompt = True, # Must add for generation
    enable_thinking = True, # Disable thinking
)

from transformers import TextStreamer
outputs = model.generate(
    **tokenizer(text, return_tensors = "pt").to("cuda"),
    max_new_tokens = 2048, # Increase for longer outputs!
    temperature = 0.6, top_p = 0.95, top_k = 20, # For thinking
    streamer = TextStreamer(tokenizer, skip_prompt = True),
)

response = tokenizer.batch_decode(outputs)

print(response[0])