package com.lfy.kcat.content.controller.ai.controller;

import com.alibaba.cloud.ai.dashscope.image.DashScopeImageModel;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.*;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.alibaba.dashscope.exception.UploadFileException;
import com.alibaba.dashscope.utils.JsonUtils;
import org.springframework.ai.image.ImagePrompt;
import org.springframework.ai.image.ImageResponse;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.bind.annotation.RestController;

import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;

@RestController
@RequestMapping("/qwen")
public class QwenImageController {
    @Autowired
    DashScopeImageModel dashScopeImageModel;
    @GetMapping("/baseImage")
    public  String BaseImage(@RequestParam("msg") String message) throws NoApiKeyException, UploadFileException {
        MultiModalConversation conv = new MultiModalConversation();

        MultiModalMessage userMessage = MultiModalMessage.builder().role(Role.USER.getValue())
            .content(Arrays.asList(
                Collections.singletonMap("text", message)
            )).build();

        Map<String, Object> parameters = new HashMap<>();
        parameters.put("watermark", true);
        parameters.put("prompt_extend", true);
        parameters.put("negative_prompt", "");
        parameters.put("size", "1140*1472");

        MultiModalConversationParam param = MultiModalConversationParam.builder()
            .apiKey("sk-f531c31c1c23465b8c0bfb5214eb5612")
            .model("qwen-image")
            .messages(Collections.singletonList(userMessage))
            .parameters(parameters)
            .build();

        MultiModalConversationResult result = conv.call(param);
        //将里面的json对象拆解到map对象
        String json = JsonUtils.toJson(result);
        Map<String, Object> first = result.getOutput().getChoices().get(0).getMessage().getContent().get(0);
        String image = first.get("image").toString();
        return"<img src="+image+">";
    }
    @RequestMapping("/image")
    public String image(@RequestParam("msg") String message){

        String result="白白的软软的，白丝足";
        ImagePrompt imagePrompt = new ImagePrompt(message);
        ImageResponse call = dashScopeImageModel.call(imagePrompt);
        // 返回图片地址,各个函数作用
//
//        dashScopeImageModel.call() 方法
//        作用：调用阿里云DashScope图像生成模型，传入提示信息并获取生成结果
//        参数：接收一个ImagePrompt对象，包含图像生成的提示信息
//        返回值：返回一个ImageResponse对象，包含生成的图像信息
//        3. call.getResult() 方法
//        作用：从ImageResponse对象中获取生成结果
//        返回值：返回图像生成的结果对象
//        4. getOutput() 方法
//        作用：获取结果的输出信息
//        返回值：包含图像URL等输出信息的对象
//        5. getUrl() 方法
//        作用：获取生成图像的URL地址
//        返回值：图像的访问链接
        String url = call.getResult().getOutput().getUrl();
        return url;
    }
}
