
model_name = "/mnt/nas/shengjie/huggingface_model_local/Qwen3-8B"


def get_model():
    from transformers import AutoModelForCausalLM, AutoTokenizer
    import torch
    # load the tokenizer and the model
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        torch_dtype=torch.bfloat16,
        device_map="auto"
    )
    return tokenizer, model


def test():
    import os
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--cuda', type=str, default='2', help='CUDA device id')
    parser.add_argument('-p', '--port', type=int, default=20022, help='port')
    args, unknown = parser.parse_known_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda


    template_json_path = 'demo_qwenvl_clothing.json'
    # 读取模板
    with open(template_json_path, 'r', encoding='utf-8') as f:
        template_json_str = f.read()
    # prepare the model input
    get_question = lambda search_txt: f"{template_json_str} 检索语句：{search_txt} .\
            将检索语句转换为为上面的json模板格式并输出，不要加入markdown语法，只是按这个输出即可。\
            注意：json模板的内容只是参考，不要作为默认值！检索语句中没有提到的内容全部填充为空，不要臆想！关于count的部分，没有指定数量，则填写-1."

    search_txt = '春秋两季黑色大衣，两个口袋'
    question  = get_question(search_txt)

    tokenizer, qwen_model = get_model()

    ans = get_ans( tokenizer, qwen_model, question )

    print(ans)


def get_ans(tokenizer, qwen_model, question):
    messages = [
        {"role": "user", "content": question}
    ]
    text = tokenizer.apply_chat_template(
        messages,
        tokenize=False,
        add_generation_prompt=True,
        enable_thinking=False # Switches between thinking and non-thinking modes. Default is True.
    )
    model_inputs = tokenizer([text], return_tensors="pt").to(qwen_model.device)

    # conduct text completion
    generated_ids = qwen_model.generate(
        **model_inputs,
        max_new_tokens=2048
    )
    output_ids = generated_ids[0][len(model_inputs.input_ids[0]):].tolist() 

    # parsing thinking content
    try:
        # rindex finding 151668 (</think>)
        index = len(output_ids) - output_ids[::-1].index(151668)
    except ValueError:
        index = 0

    thinking_content = tokenizer.decode(output_ids[:index], skip_special_tokens=True).strip("\n")
    content = tokenizer.decode(output_ids[index:], skip_special_tokens=True).strip("\n")

    print("thinking content:", thinking_content)
    print("content:", content)

    return content


if __name__ == '__main__':
    test()