

QWENVL='/mnt/nas/shengjie/huggingface_model_local/qwenvl'


def get_qwenvl():
    from transformers import AutoModelForCausalLM, AutoTokenizer
    # from transformers.generation import GenerationConfig
    import torch
    torch.manual_seed(1234)

    tokenizer = AutoTokenizer.from_pretrained(QWENVL, trust_remote_code=True)

    # use bf16
    # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL", device_map="auto", trust_remote_code=True, bf16=True).eval()
    # use fp16
    # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL", device_map="auto", trust_remote_code=True, fp16=True).eval()
    # use cpu only
    # model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen-VL", device_map="cpu", trust_remote_code=True).eval()
    # use cuda device
    qwenvl_model = AutoModelForCausalLM.from_pretrained(QWENVL, 
                            device_map="cuda", trust_remote_code=True, bf16=True).eval()
    return tokenizer, qwenvl_model

def get_qwenvl_ans(tokenizer, qwenvl_model,
                    image_path='https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg', 
                    question='Generate the caption in English with grounding:'):
    # Specify hyperparameters for generation (No need to do this if you are using transformers>=4.32.0)
    # model.generation_config = GenerationConfig.from_pretrained("Qwen/Qwen-VL", trust_remote_code=True)

    query = tokenizer.from_list_format([
        {'image': image_path},
        {'text': question},
    ])
    inputs = tokenizer(query, return_tensors='pt')
    inputs = inputs.to(qwenvl_model.device)
    pred = qwenvl_model.generate(**inputs)
    response = tokenizer.decode(pred.cpu()[0], skip_special_tokens=False)
    print(response)
    # <img>https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg</img>Generate the caption in English with grounding:<ref> Woman</ref><box>(451,379),(731,806)</box> and<ref> her dog</ref><box>(219,424),(576,896)</box> playing on the beach<|endoftext|>
    image = tokenizer.draw_bbox_on_latest_picture(response)
    if image:
        qwenvl_model.save('test_qwen.jpg')
    else:
        print("no box")

    return response


def main():
    template_json_path = 'demo_qwenvl_clothing.json'
    with open(template_json_path, 'r', encoding='utf-8') as f:
        test_template_json_str = f.read()
    import json
    with open(template_json_path, 'r', encoding='utf-8') as f:
        template_json_dict = json.load(f)
    # 把每一对k:v 转换为str类型的json str
    items_json_str_list = []
    if isinstance(template_json_dict, dict):
        for k, v in template_json_dict.items():
            # 注意：需要对每个value转为字符串，如v为dict/list等需序列化
            try:
                v_str = json.dumps(v, ensure_ascii=False)
            except Exception:
                v_str = str(v)
            kv_str = f'"{k}": {v_str}'
            items_json_str_list.append(kv_str)
    
    # test_template_json_str = items_json_str_list[0]
    image_path = 'compare_collar/c-3.jpg'
    base_json_txt = ''
    desc_json_txt = ''
    other_json_txt = ''

    question = f'''
    请严格按照以下JSON模板结构返回数据，不要添加任何额外内容：

    模板结构：
    {test_template_json_str}

    要求：
    1. 只返回纯JSON数据，不要有解释性文字
    2. 确保所有字段名称与模板完全一致
    3. 如果某些信息无法获取，对应字段值为null
    4. 不要包含```json```等代码块标记

    现在处理数据并返回JSON：
    '''
    # question = f"First, read this clothing image. \
    #             Then, answer according to the clothing JSON template provided below: {test_template_json_str}. \
    #             Finally, generate ONLY the JSON data content. Important: generate ONLY the JSON data content!"

    tokenizer, qwenvl_model = get_qwenvl()
    ans = get_qwenvl_ans(tokenizer, qwenvl_model,
                        image_path, question)
    print(ans)                    

if __name__ == '__main__':
    import argparse,os
    parser = argparse.ArgumentParser()
    parser.add_argument('-c', '--cuda', type=str, default='2', help='CUDA device id')
    parser.add_argument('-p', '--port', type=int, default=20022, help='port')
    args, unknown = parser.parse_known_args()
    os.environ['CUDA_VISIBLE_DEVICES'] = args.cuda

    main()