# from modelscope import AutoModelForCausalLM, AutoTokenizer
# import torch
#
# # 设置模型路径
# model_name = "/home/liuzhongzhong/data/save_modles"
#
# try:
#     # 加载tokenizer
#     tokenizer = AutoTokenizer.from_pretrained(model_name)
#
#     # 加载模型
#     model = AutoModelForCausalLM.from_pretrained(
#         model_name,
#         torch_dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,
#         device_map="auto"
#     )
#
#     print("模型加载成功!")
#
#     # 准备模型输入
#     prompt = "通道更新一下"
#     messages = [
#         {"role": "user", "content": prompt}
#     ]
#
#     # 应用聊天模板
#     text = tokenizer.apply_chat_template(
#         messages,
#         tokenize=False,
#         add_generation_prompt=True
#     )
#
#     print("生成的输入文本:", text)
#
#     # 对输入进行编码
#     model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
#
#     # 生成文本
#     generated_ids = model.generate(
#         **model_inputs,
#         max_new_tokens=100,
#         temperature=0.7,
#         do_sample=True,
#         pad_token_id=tokenizer.eos_token_id
#     )
#
#     # 解码生成的文本
#     generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
#     print("生成的完整响应:", generated_text)
#
#     # 提取assistant的回复部分
#     # 查找assistant开始标记后的内容
#     if "<|im_start|>assistant" in generated_text:
#         assistant_response = generated_text.split("<|im_start|>assistant")[-1]
#         if "<|im_end|>" in assistant_response:
#             assistant_response = assistant_response.split("<|im_end|>")[0]
#         print("助理回复:", assistant_response.strip())
#     else:
#         print("无法提取助理回复，显示完整响应:", generated_text)
#
# except Exception as e:
#     print(f"加载或运行模型时出错: {e}")
#     import traceback
#
#     traceback.print_exc()


# from modelscope import AutoModelForCausalLM, AutoTokenizer
#
# # model_name = "Qwen/Qwen3-0.6B"
# model_name = "/home/liuzhongzhong/data/save_modles"
#
#
# # load the tokenizer and the model
# tokenizer = AutoTokenizer.from_pretrained(model_name)
# model = AutoModelForCausalLM.from_pretrained(
#     model_name,
#     torch_dtype="auto",
#     device_map="auto"
# )
#
# # prepare the model input
# prompt = "通道更新一下."
# messages = [
#     {"role": "user", "content": prompt}
# ]
# text = tokenizer.apply_chat_template(
#     messages,
#     tokenize=False,
#     add_generation_prompt=True,
#     enable_thinking=True # Switches between thinking and non-thinking modes. Default is True.
# )
# model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
#
# # conduct text completion
# generated_ids = model.generate(
#     **model_inputs,
#     max_new_tokens=32768
# )
# output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()
#
# # parsing thinking content
# try:
#     # rindex finding 151668 (</think>)
#     index = len(output_ids) - output_ids[::-1].index(151668)
# except ValueError:
#     index = 0
#
# thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
# content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")
#
# # print("thinking content:", thinking_content)
# print("content:", content)


from modelscope import AutoModelForCausalLM, AutoTokenizer

# model_name = "Qwen/Qwen3-0.6B"
model_name = "/home/liuzhongzhong/data/save_modles"

# model_name = "/root/autodl-tmp/save_modles"
# model_name = "/root/autodl-tmp/save_modles"




# load the tokenizer and the model
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)

# prepare the model input
prompt = "你叫啥？"
messages = [
    {"role": "user", "content": prompt}
]
text = tokenizer.apply_chat_template(
    messages,
    tokenize=False,
    add_generation_prompt=True,
    enable_thinking=True # Switches between thinking and non-thinking modes. Default is True.
)
model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

# conduct text completion
generated_ids = model.generate(
    **model_inputs,
    max_new_tokens=32768
)
output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist()

# parsing thinking content
try:
    # rindex finding 151668 (</think>)
    index = len(output_ids) - output_ids[::-1].index(151668)
except ValueError:
    index = 0

thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")

# print("thinking content:", thinking_content)
print("content:", content)