package com.ruoyi.common.utils;

import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.common.Role;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import com.alibaba.dashscope.utils.JsonUtils;
import org.noear.snack.ONode;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.*;

public class QwUtil {
    private static final Logger logger = LoggerFactory.getLogger(QwUtil.class);
    /**
     *
     * @param path 路径
     * @param description 描述
     * @param type 类型  1 线上图片解析 2 本地上传图片解析  3 线上视频解析 4 本地上传视频解析
     * @return
     * @throws NoApiKeyException
     * @throws UploadFileException
     */
    public static  String conversationCallList(String path,List<String> pathList,String  description,int type) throws NoApiKeyException, UploadFileException {
        String result = "";
        if(type==1){
            result =  multiModalConversationCall( path,  description);
        }else if(type==2){
            if(pathList!=null&&pathList.size()>0){
                result = callWithLocalFileList(pathList,description);
            }else{
                result =  callWithLocalFile( path,  description);
            }

        }else if(type==3){
            result =  multiModalVedioConversationCall( path,  description);
        }else if(type==4){
            result =  callWithVideoLocalFile( path,  description);
        }
        return result;
    }

    public static  String conversationCall(String path,String  description,int type) throws NoApiKeyException, UploadFileException {

        return conversationCallList(path,null, description, type);
    }

    public static String multiModalConversationCall(String imageUrl,String imgageDescribe)
            throws ApiException, NoApiKeyException, UploadFileException {
        long startTime = 0L;
        long endTime = 0L;
        startTime = System.currentTimeMillis();
        MultiModalConversation conv = new MultiModalConversation();
        MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
                .content(Arrays.asList(Collections.singletonMap("image", imageUrl),
                        Collections.singletonMap("text", imgageDescribe))).build();
        MultiModalConversationParam param = MultiModalConversationParam.builder()
                .model(MultiModalConversation.Models.QWEN_VL_PLUS)
                .message(userMessage)
                .build();
        MultiModalConversationResult result = conv.call(param);
        endTime = System.currentTimeMillis();
        long sBuffer_timer_delta = (endTime - startTime) / 1000;
        logger.info("图片解析时间:{}秒",sBuffer_timer_delta);
        return JsonUtils.toJson(result);
    }
    public static String multiModalVedioConversationCall(String videoUrl,String description)
            throws ApiException, NoApiKeyException, UploadFileException {
        long startTime = 0L;
        long endTime = 0L;
        startTime = System.currentTimeMillis();
        MultiModalConversation conv = new MultiModalConversation();
        MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
                .content(Arrays.asList(
                        Collections.singletonMap("video", videoUrl),
                        Collections.singletonMap("text", description))).build();
        MultiModalConversationParam param = MultiModalConversationParam.builder()
                .model("qwen-vl-max-latest")
                .message(userMessage)
                .build();
        MultiModalConversationResult result = conv.call(param);
        endTime = System.currentTimeMillis();
        long sBuffer_timer_delta = (endTime - startTime) / 1000;
        logger.info("视频解析时间:{}秒",sBuffer_timer_delta);
       return  JsonUtils.toJson(result);
    }
    public  static String callWithLocalFile(String localPath,String imgageDescribe)
            throws ApiException, NoApiKeyException, UploadFileException {
        long startTime = 0L;
        long endTime = 0L;
        localPath = localPath.replace("\\","/");
        startTime = System.currentTimeMillis();
        String filePath = "file:///"+localPath;
        MultiModalConversation conv = new MultiModalConversation();
        MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
                .content(Arrays.asList(new HashMap<String, Object>(){{put("image", filePath);}},
                        new HashMap<String, Object>(){{put("text", imgageDescribe);}})).build();
        MultiModalConversationParam param = MultiModalConversationParam.builder()
                .model("qwen-vl-max-latest")
                .message(userMessage)
                .build();
        MultiModalConversationResult result = conv.call(param);
        logger.info("图片解析结果:{}",result);
        endTime = System.currentTimeMillis();
        long sBuffer_timer_delta = (endTime - startTime) / 1000;
        logger.info("图片解析时间:{}秒",sBuffer_timer_delta);
        return JsonUtils.toJson(result);
    }

    public  static String callWithLocalFileList(List<String> localPaths, String imgageDescribe)
            throws ApiException, NoApiKeyException, UploadFileException {
        long startTime = 0L;
        long endTime = 0L;
        ArrayList arrayList = new ArrayList();
        localPaths.forEach(localPath->{
            localPath = localPath.replace("\\","/");
            String filePath = "file:///"+localPath;
            arrayList.contains(Collections.singletonMap("image",filePath));
        });
        arrayList.contains(Collections.singletonMap("text",imgageDescribe));
        startTime = System.currentTimeMillis();

        MultiModalConversation conv = new MultiModalConversation();
        MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
                .content(arrayList).build();
        MultiModalConversationParam param = MultiModalConversationParam.builder()
                .model("qwen-vl-max-latest")
                .message(userMessage)
                .build();
        MultiModalConversationResult result = conv.call(param);
        logger.info("图片解析结果:{}",result);
        System.out.println(result);
        endTime = System.currentTimeMillis();
        long sBuffer_timer_delta = (endTime - startTime) / 1000;
        logger.info("图片解析时间:{}秒",sBuffer_timer_delta);
        return JsonUtils.toJson(result);
    }




    public  static String callWithVideoLocalFile(String localPath,String imgageDescribe)
            throws ApiException, NoApiKeyException, UploadFileException {

        long startTime = 0L;
        long endTime = 0L;
        startTime = System.currentTimeMillis();
        localPath = localPath.replace("\\","/");
        String filePath = "file:///"+localPath;
        MultiModalConversation conv = new MultiModalConversation();
        MultiModalMessage systemMessage = MultiModalMessage.builder().role(Role.SYSTEM.getValue())
                .content(Arrays.asList(Collections.singletonMap("text", imgageDescribe))).build();
        MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
                .content(Arrays.asList(new HashMap<String, Object>()
                                       {{
                                           put("video", filePath);// fps参数控制视频抽帧数量，表示每隔1/fps 秒抽取一帧
                                           put("fps", 2);
                                       }},
                        new HashMap<String, Object>(){{put("text", "这段视频描绘的是什么景象？");}})).build();
        MultiModalConversationParam param = MultiModalConversationParam.builder()
                // 若没有配置环境变量，请用百炼API Key将下行替换为：.apiKey("sk-xxx")
                .apiKey(System.getenv("DASHSCOPE_API_KEY"))
                .model("qwen-vl-max-latest")
                .messages(Arrays.asList(systemMessage, userMessage))
                .build();
        MultiModalConversationResult result = conv.call(param);
        System.out.println(result);
        endTime = System.currentTimeMillis();
        long sBuffer_timer_delta = (endTime - startTime) / 1000;
        logger.info("视频解析时间:{}秒",sBuffer_timer_delta);
        return JsonUtils.toJson(result);
    }
    public static void main(String[] args) throws NoApiKeyException, UploadFileException {
////        multiModalConversationCall("https://dashscope.oss-cn-beijing.aliyuncs.com/images/dog_and_girl.jpeg","解析图片");
//         String content = "{\"requestId\":\"c1fde568-c7fe-951c-a7fd-0c356fe04c1d\",\"usage\":{\"input_tokens\":1255,\"output_tokens\":38},\"output\":{\"choices\":[{\"finish_reason\":\"stop\",\"message\":{\"role\":\"assistant\",\"content\":[{\"text\":\"这是一只在天空中飞翔的鹰。它有着广阔的翅膀，正在翱翔于云层之间。这种景象常常象征着自由、力量和勇气等正面意义。\"}]}}]}}";
//         ONode oNode = ONode.load(content);
//        ONode node = oNode.select("$.output.choices[0].message.content[0].text");
//        System.out.println(node);
        callWithVideoLocalFile("C:\\Users\\lenovo\\Desktop\\返修\\sample20_merged_group_shuffled\\镜头距离太近\\狗\\宠物喝水\\B01319.mp4","视频中的什么颜色什么品种的动物在什么时间什么光线什么场景中做什么，以一句话描述，其中时间只有白天和夜间，光线只有自然光和灯光示例：白天自然光，一只黑白相间的阿拉斯加犬站在客厅的餐桌旁正在吃菜。");
//{"requestId":"51ab41d7-74a5-9e0b-80c0-0d7290122dbb","usage":{"input_tokens":5204,"output_tokens":108},"output":{"choices":[{"finish_reason":"stop","message":{"role":"assistant","content":[{"text":"这张图片显示的是一个在线平台的界面，可能是某种编程或数据分析工具。界面上方有一个导航栏，包含多个选项卡，如“模型体验”、“模型调试”等。左侧有一个侧边栏，包含多个菜单选项，如“模型中心”、“应用中心”、“数据中心”等。中间部分显示了输入框和一些配置选项，如“top_p”、“temperature”等。右侧显示了输入和输出的结果。整体界面看起来像是一个用于模型训练或调试的平台。"}]}}]}}
    }
}
