package com.cfp4cloud.cfp.knowledge.service.impl;

import cn.hutool.core.codec.Base64;
import com.baomidou.mybatisplus.core.toolkit.Wrappers;
import com.cfp4cloud.cfp.admin.api.utils.ByteArrayMultipartFile;
import com.cfp4cloud.cfp.knowledge.dto.AiGenerateDTO;
import com.cfp4cloud.cfp.knowledge.dto.AiVoiceCompletionsDTO;
import com.cfp4cloud.cfp.knowledge.entity.AiModelEntity;
import com.cfp4cloud.cfp.knowledge.mapper.AiModelMapper;
import com.cfp4cloud.cfp.knowledge.service.AiDashscopeAssistantService;
import com.cfp4cloud.cfp.knowledge.service.AiGenerateService;
import com.cfp4cloud.cfp.knowledge.service.AiSiliconflowAssistantService;
import com.cfp4cloud.cfp.knowledge.support.constant.ModelSupportEnums;
import com.cfp4cloud.cfp.knowledge.support.provider.ModelProvider;
import lombok.RequiredArgsConstructor;
import lombok.SneakyThrows;
import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.chat.prompt.PromptTemplate;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.core.io.Resource;
import org.springframework.stereotype.Service;

import java.util.Objects;

/**
 * AI内容生成服务实现类
 * <p>
 * 提供文本和音频的相互转换功能 支持多种模型进行音频识别和语音合成 使用提示词模板生成结构化文本
 *
 * @author chenda
 * @date 2024/6/17
 */
@Slf4j
@Service
@RequiredArgsConstructor
public class AiGenerateServiceImpl implements AiGenerateService {

	/**
	 * 阿里云AI助手服务 用于音频转文本备选方案
	 */
	private final AiDashscopeAssistantService aiDashscopeAssistantService;

	/**
	 * AI模型数据访问层
	 */
	private final AiModelMapper aiModelMapper;

	/**
	 * 模型提供者 管理多种模型实例
	 */
	private final ModelProvider modelProvider;

	/**
	 * 文本生成提示词模板 用于构建结构化提示词
	 */
	@Value("classpath:/prompts/gen-text.st")
	private Resource generateTextResource;

	/**
	 * 生成结构化文本
	 * <p>
	 * 使用提示词模板和条件生成文本 支持动态指定AI模型
	 * @param generateDTO 包含提示词和条件的输入参数
	 * @return 生成的文本内容
	 */
	@Override
	public String generateText(AiGenerateDTO generateDTO) {
		PromptTemplate promptTemplat = new PromptTemplate(generateTextResource);
		promptTemplat.add("prompt", generateDTO.getPrompt());
		promptTemplat.add("conditions", generateDTO.getConditions());

		return modelProvider.getAiAssistant(generateDTO.getModelName()).getValue().chat(promptTemplat.render());
	}

	/**
	 * 选择功能（未实现）
	 * @param conversationId 会话ID
	 * @param description 描述
	 * @return null
	 */
	@Override
	public String selectFunction(String conversationId, String description) {
		return null;
	}

	/**
	 * 音频转文本
	 * <p>
	 * 使用SenseVoice或阿里云模型 将音频数据识别为文本内容
	 * @param fileData 音频文件字节数组
	 * @return 识别后的文本内容
	 */
	@Override
	public String generateTextByAudio(byte[] fileData) {
		AiModelEntity aiModelEntity = aiModelMapper.selectOne(Wrappers.<AiModelEntity>lambdaQuery()
			.eq(AiModelEntity::getProvider, ModelSupportEnums.SILICONFLOW_SENSE_VOICE_SMALL.getProvider())
			.eq(AiModelEntity::getModelName, ModelSupportEnums.SILICONFLOW_SENSE_VOICE_SMALL.getCode()), false);

		if (Objects.isNull(aiModelEntity)) {
			return "未找到对应的音频模型: " + ModelSupportEnums.SILICONFLOW_SENSE_VOICE_SMALL.getCode();
		}

		if (ModelSupportEnums.SILICONFLOW_SENSE_VOICE_SMALL.getCode().equals(aiModelEntity.getModelName())) {
			AiSiliconflowAssistantService fileAssistant = modelProvider
				.getSiliconflowAssistant(aiModelEntity.getName());
			ByteArrayMultipartFile byteArrayMultipartFile = new ByteArrayMultipartFile("upload.wav", "upload.wav", null,
					fileData);
			return fileAssistant.audioToText(byteArrayMultipartFile, aiModelEntity.getModelName());
		}

		return aiDashscopeAssistantService.audioToText(fileData, aiModelEntity);
	}

	/**
	 * 文本转音频
	 * <p>
	 * 使用GPT-SoVITS模型进行语音合成 返回Base64编码的音频数据
	 * @param text 待合成的文本内容
	 * @return Base64编码的音频数据
	 */
	@Override
	@SneakyThrows
	public String generateAudioByText(String text) {
		AiModelEntity aiModelEntity = aiModelMapper.selectOne(Wrappers.<AiModelEntity>lambdaQuery()
			.eq(AiModelEntity::getProvider, ModelSupportEnums.SILICONFLOW_GPT_SOVITS.getProvider())
			.eq(AiModelEntity::getModelName, ModelSupportEnums.SILICONFLOW_GPT_SOVITS.getCode()), false);

		if (Objects.isNull(aiModelEntity)) {
			return "未找到对应的音频模型: " + ModelSupportEnums.SILICONFLOW_GPT_SOVITS.getCode();
		}

		AiSiliconflowAssistantService fileAssistant = modelProvider.getSiliconflowAssistant(aiModelEntity.getName());

		AiVoiceCompletionsDTO audioModelDTO = new AiVoiceCompletionsDTO();
		audioModelDTO.setInput(text);
		audioModelDTO.setModel(ModelSupportEnums.SILICONFLOW_GPT_SOVITS.getCode());
		audioModelDTO.setVoice(ModelSupportEnums.SILICONFLOW_GPT_SOVITS.getDesc());
		Resource resource = fileAssistant.textToAudio(audioModelDTO);
		return Base64.encode(resource.getContentAsByteArray());
	}

}
