package com.barry.io.utils;

import java.util.*;

import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import lombok.extern.slf4j.Slf4j;

/**
 * 通义千问VL-Max模型的视觉理解能力最强
 * 参考: https://help.aliyun.com/zh/model-studio/vision/#14d646f5a0owq
 * 通义千问VL 模型支持单次请求传入多张图片进行综合分析，所有图像的总Token数需在模型的最大输入之内，可传入图像的最大数量请参考图像数量限制。
 * 多图像输入进行AI诊断分析
 * */
@Slf4j
public class MultipleImagesDiagnosis {
    public static void simpleMultiModalConversationCall()
            throws ApiException, NoApiKeyException, UploadFileException {
        MultiModalConversation conv = new MultiModalConversation();
        // 如果使用本地图像，请导入 import java.util.HashMap;，再为函数添加【String localPath】参数，表示本地文件的实际路径
        // 并解除下面注释，当前测试系统为macOS。如果您使用Windows系统，文件路径请用 file:///"+localPath 代替
        // String filePath = "file://"+localPath;
        String text = "根据舌面进行中医AI诊断，出一个JSON格式报告，报告维度有体质名称，中医证型，健康分值，体质分析，典型特征，风险预警，异常分析，调理建议等, " +
                "请把接口返回的MultiModalConversationResult中的MultiModalMessage的内容text内容以JSON格式返回。";

        List<Map<String, Object>> imageList = Arrays.asList(
                // 第一张图像url
                Collections.singletonMap("image", "https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241022/emyrja/dog_and_girl.jpeg"),
                // 如果使用本地图像，请并解除下面注释
                // new HashMap<String, Object>(){{put("image", filePath);}},
                // 第二张图像url
                Collections.singletonMap("image", "https://dashscope.oss-cn-beijing.aliyuncs.com/images/tiger.png"),
                // 第三张图像url
                Collections.singletonMap("image", "https://help-static-aliyun-doc.aliyuncs.com/file-manage-files/zh-CN/20241108/hbygyo/rabbit.jpg"),
                Collections.singletonMap("text", text));

        MultiModalMessage systemMessage = MultiModalMessage.builder().role(Role.SYSTEM.getValue())
                .content(Arrays.asList(
                        Collections.singletonMap("text", "You are a helpful assistant."))).build();
        MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
                .content(imageList).build();
        MultiModalConversationParam param = MultiModalConversationParam.builder()
                .apiKey(Const.ALI_BAI_LIAN_API_KEY)
                .model(Const.ALI_BAI_LIAN_MODEL_VL_MAX)  // 此处以qwen-vl-max-latest为例，可按需更换模型名称。模型列表：https://help.aliyun.com/zh/model-studio/models
                .messages(Arrays.asList(systemMessage, userMessage))
                .build();
        MultiModalConversationResult result = conv.call(param);
        log.info("AI大模型返回值为: {}", result.getOutput().getChoices().get(0).getMessage().getContent().get(0).get("text"));
    }
    public static void main(String[] args) {
        try {
            simpleMultiModalConversationCall();
        } catch (ApiException | NoApiKeyException | UploadFileException e) {
            log.error("解析报错: {} ", e.getMessage(), e);
        }
        System.exit(0);
    }
}