/*
 * Copyright 2024 the original author or authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *      https://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package com.djf.springaidemo.controller;

import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversation;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationParam;
import com.alibaba.dashscope.aigc.multimodalconversation.MultiModalConversationResult;
import com.alibaba.dashscope.common.MultiModalMessage;
import com.alibaba.cloud.ai.dashscope.chat.DashScopeChatModel;
import com.alibaba.cloud.ai.dashscope.chat.DashScopeChatOptions;
import com.alibaba.cloud.ai.dashscope.chat.MessageFormat;
import com.alibaba.dashscope.common.Role;
import com.djf.springaidemo.data.ModelEnum;
import com.djf.springaidemo.helper.FrameExtraHelper;
import jakarta.annotation.Resource;
import org.springframework.ai.chat.client.ChatClient;
import org.springframework.ai.chat.messages.UserMessage;
import org.springframework.ai.chat.model.ChatModel;
import org.springframework.ai.chat.model.ChatResponse;
import org.springframework.ai.chat.prompt.Prompt;
import org.springframework.ai.model.Media;
import org.springframework.core.io.ResourceLoader;
import org.springframework.util.MimeTypeUtils;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.bind.annotation.RestController;
import java.net.URI;
import java.util.*;

/**
 * 多模态控制器，提供基于图像、视频帧和文本识别的AI能力接口。
 * 包括视觉理解、视频推理、流式图像处理和文字提取等功能。
 */
@RestController
@RequestMapping("/multi")
public class MultiModelController {

	/**
	 * ChatClient 实例，用于调用多模态大模型进行对话交互
	 */
	private final ChatClient dashScopeChatClient;

	/**
	 * ResourceLoader 实例，用于加载资源文件（如图片）
	 */
	@Resource
	private ResourceLoader resourceLoader;

	/**
	 * 默认提示语：用于图像理解任务
	 */
	private static final String DEFAULT_PROMPT = "这些是什么？";

	/**
	 * 默认视频提示语：用于视频帧分析任务
	 */
	private static final String DEFAULT_VIDEO_PROMPT = "这是一组从视频中提取的图片帧，请描述此视频中的内容。";

	/**
	 * 默认使用的多模态模型名称
	 */
	private static final String DEFAULT_MODEL = "qwen-vl-max-latest";

	/**
	 * 构造函数，初始化 ChatClient 实例
	 *
	 * @param chatModel Spring AI 提供的 ChatModel 实现
	 */
	public MultiModelController(ChatModel chatModel) {
		this.dashScopeChatClient = ChatClient.builder(chatModel).build();
	}

	/**
	 * 图像理解接口：远程图片 URL 方式
	 * 使用指定提示词对远程图片进行视觉理解分析
	 *
	 * @param prompt 用户输入的提示词，默认为 "这些是什么？"
	 * @return 模型返回的分析结果文本
	 * @throws Exception 如果请求失败或发生异常
	 */
	@GetMapping("/image")
	public String image(@RequestParam(value = "prompt", required = false, defaultValue = DEFAULT_PROMPT) String prompt) throws Exception {
		List<Media> mediaList = List.of(
				new Media(
						MimeTypeUtils.IMAGE_PNG,
						new URI("https://img-s.msn.cn/tenant/amp/entityid/AA1E879I.img?w=534&h=950&m=6").toURL()
				)
		);

		UserMessage message = new UserMessage(prompt, mediaList);
		message.getMetadata().put(DashScopeChatModel.MESSAGE_FORMAT, MessageFormat.IMAGE);

		ChatResponse response = dashScopeChatClient.prompt(
				new Prompt(
						message,
						DashScopeChatOptions.builder()
								.withModel(DEFAULT_MODEL)
								.withMultiModel(true)
								.build()
				)
		).call().chatResponse();

		return response.getResult().getOutput().getText();
	}

	/**
	 * 视频推理接口：使用从视频中提取的帧列表进行内容分析
	 * 调用 FrameExtraHelper 获取视频帧并进行推理
	 *
	 * @param prompt 用户输入的提示词，默认为 "这是一组从视频中提取的图片帧，请描述此视频中的内容。"
	 * @return 模型返回的视频内容描述文本
	 */
	@GetMapping("/video")
	public String video(@RequestParam(value = "prompt", required = false, defaultValue = DEFAULT_VIDEO_PROMPT) String prompt) {
		List<Media> mediaList = FrameExtraHelper.createMediaList(10);

		UserMessage message = new UserMessage(prompt, mediaList);
		message.getMetadata().put(DashScopeChatModel.MESSAGE_FORMAT, MessageFormat.VIDEO);

		ChatResponse response = dashScopeChatClient.prompt(
				new Prompt(
						message,
						DashScopeChatOptions.builder()
								.withModel("qwen-vl-max-latest")
								.withMultiModel(true)
								.build()
				)
		).call().chatResponse();

		return response.getResult().getOutput().getText();
	}

	/**
	 * 图像理解接口：本地资源路径方式
	 * 使用 classpath 加载本地图片进行视觉理解分析
	 *
	 * @param prompt 用户输入的提示词，默认为 "这些是什么？"
	 * @return 模型返回的分析结果文本
	 * @throws Exception 如果请求失败或发生异常
	 */
	@GetMapping("/image/bin")
	public String imagesBinary(@RequestParam(value = "prompt", required = false, defaultValue = DEFAULT_PROMPT) String prompt) throws Exception {
		UserMessage message = new UserMessage(
				prompt,
				new Media(
						MimeTypeUtils.IMAGE_JPEG,
						resourceLoader.getResource("classpath:/multimodel/dog_and_girl.jpeg")
				));
		message.getMetadata().put(DashScopeChatModel.MESSAGE_FORMAT, MessageFormat.IMAGE);

		ChatResponse response = dashScopeChatClient.prompt(
				new Prompt(
						message,
						DashScopeChatOptions.builder()
								.withModel(ModelEnum.VISUAL_COMPREHENSION.getModel())
								.withMultiModel(true)
								.build()
				)
		).call().chatResponse();

		return response.getResult().getOutput().getText();
	}

	/**
	 * 流式图像理解接口：支持逐步接收模型输出结果
	 * 使用 classpath 加载本地图片进行视觉理解分析
	 *
	 * @param prompt 用户输入的提示词，默认为 "这些是什么？"
	 * @return 模型返回的完整分析结果文本
	 */
	@GetMapping("/stream/image")
	public String streamImage(@RequestParam(value = "prompt", required = false, defaultValue = DEFAULT_PROMPT) String prompt) {
		UserMessage message = new UserMessage(
				prompt,
				new Media(
						MimeTypeUtils.IMAGE_JPEG,
						resourceLoader.getResource("classpath:/multimodel/dog_and_girl.jpeg")
				));
		message.getMetadata().put(DashScopeChatModel.MESSAGE_FORMAT, MessageFormat.IMAGE);

		List<ChatResponse> response = dashScopeChatClient.prompt(
				new Prompt(
						message,
						DashScopeChatOptions.builder()
								.withModel(ModelEnum.VISUAL_COMPREHENSION.getModel())
								.withMultiModel(true)
								.build()
				)
		).stream().chatResponse().collectList().block();

		StringBuilder result = new StringBuilder();
		if (response != null) {
			for (ChatResponse chatResponse : response) {
				String outputContent = chatResponse.getResult().getOutput().getText();
				result.append(outputContent);
			}
		}

		return result.toString();
	}

	/**
	 * 文字提取接口：识别图片中的文字内容
	 * 支持远程图片 URL 和本地资源路径（注释掉）
	 *
	 * @param prompt 用户输入的提示词（当前未生效，模型固定使用 "Read all the text in the image."）
	 * @return 图片中识别出的文字内容
	 */
	@GetMapping("/text")
	public Object text(@RequestParam(value = "prompt", required = false, defaultValue = DEFAULT_PROMPT) String prompt) {
		MultiModalConversation conv = new MultiModalConversation();

		Map<String, Object> map = new HashMap<>();
		map.put("image", "https://img-s.msn.cn/tenant/amp/entityid/AA1E879I.img?w=534&h=950&m=6");
		//map.put("image", "classpath:/multimodel/20250428215856.png"); // 示例本地图片路径（可选）
		map.put("max_pixels", "1003520");
		map.put("min_pixels", "3136");

		MultiModalMessage userMessage = MultiModalMessage.builder()
				.role(Role.USER.getValue())
				.content(Arrays.asList(
						map,
						Collections.singletonMap("text", "Read all the text in the image.")
				))
				.build();

		MultiModalConversationParam param = MultiModalConversationParam.builder()
				.apiKey("sk-cefbd7c5871447a8a920b404ce2a558a") // 使用环境变量配置 API Key
				.model(ModelEnum.EXTRACT_TEXT.getModel())
				.message(userMessage)
				.build();

		try {
			MultiModalConversationResult result = conv.call(param);
			System.out.println(result.getOutput().getChoices().get(0).getMessage().getContent().get(0).get("text"));
			return result.getOutput().getChoices().get(0).getMessage().getContent().get(0).get("text");
		} catch (Exception e) {
			e.printStackTrace();
		}

		return null;
	}
}
