package com.zhentao.PollutionPredictio.config;

import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.dashscope.exception.ApiException;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import org.springframework.stereotype.Service;

import java.util.Arrays;
import java.util.Collections;

@Service
public class ImgAndTextAi {
    
    /**
     * 快速文本分析 - 使用轻量级模型
     */
    public Object fastTextAnalysis(String text) throws ApiException, NoApiKeyException, UploadFileException {
        int maxRetries = 3;
        int retryCount = 0;
        
        while (retryCount < maxRetries) {
            try {
                System.out.println("快速文本分析开始 (尝试 " + (retryCount + 1) + "/" + maxRetries + "): " + text.substring(0, Math.min(50, text.length())) + "...");

                MultiModalConversation conv = new MultiModalConversation();
                MultiModalMessage systemMessage = MultiModalMessage.builder().role("system")
                        .content(Arrays.asList(
                                Collections.singletonMap("text", "你是一个专业的环境污染分析专家，请提供简洁准确的分析结果。"))).build();

                MultiModalMessage userMessage = MultiModalMessage.builder().role("user")
                        .content(Arrays.asList(
                                Collections.singletonMap("text", text))).build();

                MultiModalConversationParam param = MultiModalConversationParam.builder()
                        .apiKey("sk-daa269338e874cfa868c7229bae11a7e")
                        .model("qwen-plus")  // 使用qwen-plus模型
                        .messages(Arrays.asList(systemMessage, userMessage))
                        .build();

                long startTime = System.currentTimeMillis();
                MultiModalConversationResult result = conv.call(param);
                long endTime = System.currentTimeMillis();
                
                System.out.println("快速文本分析完成，耗时: " + (endTime - startTime) + "ms");
                return result.getOutput().getChoices().get(0).getMessage().getContent().get(0).get("text");
                
            } catch (ApiException e) {
                retryCount++;
                System.err.println("快速文本分析失败 (尝试 " + retryCount + "/" + maxRetries + "): " + e.getMessage());
                
                if (retryCount >= maxRetries) {
                    System.err.println("达到最大重试次数，分析失败");
                    e.printStackTrace();
                    throw e;
                } else {
                    // 等待一段时间后重试
                    try {
                        Thread.sleep(2000 * retryCount); // 递增等待时间
                    } catch (InterruptedException ie) {
                        Thread.currentThread().interrupt();
                        throw new RuntimeException("重试被中断", ie);
                    }
                }
            } catch (Exception e) {
                System.err.println("快速文本分析发生未知错误: " + e.getMessage());
                e.printStackTrace();
                throw e;
            }
        }
        
        throw new RuntimeException("快速文本分析失败，已达到最大重试次数");
    }
    
    /**
     * 多模态分析 - 用于图像+文本
     */
    public Object simpleMultiModalConversationCall(String text, String url)
            throws ApiException, NoApiKeyException, UploadFileException {
        try {
            System.out.println("AI服务调用开始 - 文本: " + text + ", URL: " + url);

            MultiModalConversation conv = new MultiModalConversation();
            MultiModalMessage systemMessage = MultiModalMessage.builder().role("system")
                    .content(Arrays.asList(
                            Collections.singletonMap("text", "You are a helpful assistant."))).build();

            // 根据是否有图片URL来构建不同的用户消息
            MultiModalMessage userMessage;
            if (url != null && !url.trim().isEmpty()) {
                // 有图片的情况：文本+图片
                System.out.println("构建图像+文本消息");
                userMessage = MultiModalMessage.builder().role("user")
                        .content(Arrays.asList(
                                Collections.singletonMap("image", url),
                                Collections.singletonMap("text", text))).build();
            } else {
                // 只有文本的情况：仅文本
                System.out.println("构建纯文本消息");
                userMessage = MultiModalMessage.builder().role("user")
                        .content(Arrays.asList(
                                Collections.singletonMap("text", text))).build();
            }

            System.out.println("系统消息角色: " + systemMessage.getRole());
            System.out.println("用户消息角色: " + userMessage.getRole());

            MultiModalConversationParam param = MultiModalConversationParam.builder()
                    .apiKey("sk-daa269338e874cfa868c7229bae11a7e")
                    .model("qwen-vl-plus")  // 多模态场景使用qwen-vl-plus模型
                    .messages(Arrays.asList(systemMessage, userMessage))
                    .build();

            long startTime = System.currentTimeMillis();
            System.out.println("开始调用AI API...");
            MultiModalConversationResult result = conv.call(param);
            long endTime = System.currentTimeMillis();
            
            System.out.println("AI API调用成功，耗时: " + (endTime - startTime) + "ms");
            return result.getOutput().getChoices().get(0).getMessage().getContent().get(0).get("text");
        } catch (Exception e) {
            System.err.println("AI服务调用失败: " + e.getMessage());
            e.printStackTrace();
            throw e;
        }
    }
}
