from lmdeploy import pipeline, TurbomindEngineConfig, PytorchEngineConfig, GenerationConfig
from lmdeploy.vl import load_image
from PIL import Image

# model = 'E:/LLM/GLM/glm-edge-v-2b'
model = 'E:/LLM/OpenGVLab/InternVL2_5-2B'
# model = '/mnt/e/LLM/Qwen/Qwen2-VL-2B-Instruct'      # 需要triton
# model = 'E:/LLM/openbmb/MiniCPM-V-2_6-int4'
image = load_image('Small_MultiModalLM/InternVL/mouse1.jpg')
pipe = pipeline(model)
response = pipe(('图片里有什么', image))
print(response.text)

# gen_config = GenerationConfig(top_k=40, top_p=0.8, temperature=0.8)


# if 'Qwen' in model:
#     messages = [
#         {
#             "role": "user",
#             "content": [
#                 {
#                     "type": "image",
#                     "image": "Small_MultiModalLM/InternVL/mouse1.jpg",
#                 },
#                 {"type": "text", "text": "描述图片。"},
#             ],
#         }
#     ]
# else:
#     # image = Image.open('xx.jpg').convert('RGB')
#     question = 'What is in the image?'
#     messages = [{'role': 'user', 'content': [image, question]}]

# sess = pipe.chat(messages=messages, gen_config=gen_config)
# print(sess.response.text)


# messages.append({
#     "role": "user",
#     "content": [
#         {"type": "text", "text": "形容图片里的老鼠?"},
#     ]
# })
# sess = pipe.chat(messages=messages, session=sess, gen_config=gen_config)
# print(sess.response.text)