def run(params:list[str]):
    from modelscope.hub.snapshot_download import snapshot_download
    model_path = snapshot_download('xiajinpeng123/BLIP2-Chinese',revision='v1.0.0')
    import os
    os.chdir(model_path)
    import sys
    sys.path.insert(0, model_path)
    #import ms_wrapper
    from modelscope.pipelines import pipeline

    from modelscope import (
        snapshot_download, AutoModelForCausalLM, AutoTokenizer, GenerationConfig
    )
    import torch
    model_id = 'qwen/Qwen-VL-Chat'
    revision = 'v1.1.0'

    model_dir = snapshot_download(model_id, revision=revision)
    torch.manual_seed(1234)

    from ApiBase import apiBase

    try:
        image_url = apiBase.argv(1,"/data/bzmwork/softrobot/llm/codeqwen-7b/txt/ocr_recognition.jpg")
        prompt = apiBase.argv(2,"图片里面有什么")
        
        tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)
        model = AutoModelForCausalLM.from_pretrained(model_dir, device_map="auto", trust_remote_code=True, fp16=True).eval()
        # 可指定不同的生成长度、top_p等相关超参
        model.generation_config = GenerationConfig.from_pretrained(model_dir, trust_remote_code=True)

        # 第一轮对话 1st dialogue turn
        query = tokenizer.from_list_format([
            #{'image': 'https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen-VL/assets/demo.jpeg'},
            {'image':image_url},
            {'text': prompt},
        ])
        response, history = model.chat(tokenizer, query=query, history=None)
        print(response)
        # 图中是一名年轻女子在沙滩上和她的狗玩耍，狗的品种是拉布拉多。她们坐在沙滩上，狗的前腿抬起来，与人互动。
    finally:    
        apiBase.close()