def run(params:list[str]):
    from modelscope.pipelines import pipeline
    from modelscope.utils.constant import Tasks
    from modelscope.outputs import OutputKeys
    #pip install open_clip_torch
    from ApiBase import apiBase
    try:
        # input_image = 'https://xingchen-data.oss-cn-zhangjiakou.aliyuncs.com/maas/image-captioning/donuts.jpg'
        input_image = apiBase.argv(1,"/data/bzmwork/softrobot/llm/codeqwen-7b/txt/ocr_recognition.jpg")

        img_captioning = pipeline(Tasks.image_captioning, model='damo/ofa_image-caption_coco_large_en', model_revision='v1.0.1')
        result = img_captioning(input_image)
        print(result[OutputKeys.CAPTION][0]) # 'a bunch of donuts on a wooden board with popsicle sticks'
    finally:
        apiBase.close()