package com.yc.cloud.openai.portal.service.impl;

import java.nio.file.Paths;
import java.time.LocalDateTime;
import java.util.EnumMap;
import java.util.Map;
import java.util.Optional;

import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;

import com.yc.cloud.common.basic.utils.MyFileUtils;
import com.yc.cloud.common.basic.utils.PathUtils;
import com.yc.cloud.common.oss.service.OssFileService;
import com.yc.cloud.openai.entity.AudioRecord;
import com.yc.cloud.openai.portal.common.OpenAiConstants;
import com.yc.cloud.openai.portal.dao.SpeechToTextClient;
import com.yc.cloud.openai.portal.dao.TextToSpeechClient;
import com.yc.cloud.openai.portal.dto.request.SpeechToTextRequest;
import com.yc.cloud.openai.portal.dto.request.TextToSpeechRequest;
import com.yc.cloud.openai.portal.dto.response.SpeechToTextResponse;
import com.yc.cloud.openai.portal.dto.response.TextToSpeechResponse;
import com.yc.cloud.openai.portal.enums.PromptType;
import com.yc.cloud.openai.portal.enums.SpeechToTextProvider;
import com.yc.cloud.openai.portal.enums.TextToSpeechProvider;
import com.yc.cloud.openai.portal.service.AudioRecordService;
import com.yc.cloud.openai.portal.service.AudioService;
import com.yc.cloud.openai.portal.utils.AudioUtils;

import cn.hutool.core.io.FileUtil;
import cn.hutool.core.lang.UUID;
import lombok.SneakyThrows;
import lombok.val;
import lombok.extern.slf4j.Slf4j;

@Service
@Slf4j
public class AudioServiceImpl implements AudioService {

    private final Map<TextToSpeechProvider, TextToSpeechClient> textToSpeechClients = new EnumMap<>(
            TextToSpeechProvider.class);
    private final Map<SpeechToTextProvider, SpeechToTextClient> speechToTextClients = new EnumMap<>(
            SpeechToTextProvider.class);
    private final PathUtils pathUtils;
    private final OssFileService ossFileService;
    private final AudioRecordService audioRecordService;

    public AudioServiceImpl(Map<String, TextToSpeechClient> textToSpeechClientMap,
            Map<String, SpeechToTextClient> speechToTextClientMap, PathUtils pathUtils, OssFileService ossFileService,
            AudioRecordService audioRecordService) {
        this.pathUtils = pathUtils;
        this.ossFileService = ossFileService;
        this.audioRecordService = audioRecordService;
        // 使用枚举的 value 作为键来初始化 Map
        textToSpeechClientMap
                .forEach((name, client) -> textToSpeechClients.put(TextToSpeechProvider.valueOf(name), client));
        speechToTextClientMap
                .forEach((name, client) -> speechToTextClients.put(SpeechToTextProvider.valueOf(name), client));
    }

    @Override
    public TextToSpeechResponse convertTextToSpeech(TextToSpeechRequest request) {
        // 根据请求中的提供者选择对应的客户端，或者使用默认客户端
        val provider = TextToSpeechProvider.fromKey(request.getProvider());
        // 使用 Optional 查找客户端，如果未找到，则使用默认客户端
        val client = Optional.ofNullable(textToSpeechClients.get(provider))
                .orElse(getDefaultTextToSpeechClient());

        val response = client.convertTextToSpeech(request);
        if (response != null && response.getAudioData() != null) {
            // 生成音频生成记录
            generateAudioRecord(response, request);
        }
        return response;
    }

    @SneakyThrows
    @Override
    public SpeechToTextResponse convertSpeechToText(MultipartFile file, SpeechToTextRequest request) {
        // 根据请求中的提供者选择对应的客户端，或者使用默认客户端
        val provider = SpeechToTextProvider.fromKey(request.getProvider());
        // 使用 Optional 查找客户端，如果未找到，则使用默认客户端
        val client = Optional.ofNullable(speechToTextClients.get(provider))
                .orElse(getDefaultSpeechToTextClient());

        if (SpeechToTextProvider.XUN_FEI.getKey().equals(request.getProvider())) {
            // 讯飞的文件需要统一为16K采样率的wav格式，所以需要先转换
            val sampleRate16KFilePath = getSampleRate16KFilePath(file);
            request.setAudioData(FileUtil.readBytes(sampleRate16KFilePath));
        } else {
            // 将文件内容通过请求参数传递给客户端
            request.setAudioData(file.getBytes());
        }
        return client.convertSpeechToText(request);
    }

    private TextToSpeechClient getDefaultTextToSpeechClient() {
        // 返回一个默认的 TextToSpeechClient 实例
        return textToSpeechClients.get(TextToSpeechProvider.CHAT_TTS); // 假定你有一个默认的提供者
    }

    private SpeechToTextClient getDefaultSpeechToTextClient() {
        // 返回一个默认的 TextToSpeechClient 实例
        return speechToTextClients.get(SpeechToTextProvider.XUN_FEI); // 假定你有一个默认的提供者
    }

    /**
     * 获取16K采样率的音频文件路径
     *
     * @param file 原始文件
     * @return 16K采样率的音频文件路径
     */
    @SneakyThrows
    private String getSampleRate16KFilePath(MultipartFile file) {
        val format = FileUtil.getSuffix(file.getOriginalFilename());
        val data = file.getBytes();
        val prefix = String.valueOf(System.currentTimeMillis());
        val fileName = prefix + "." + format;
        var srcPath = Paths.get(pathUtils.getTempBasePath(), fileName).toString();
        FileUtil.writeBytes(data, srcPath);
        val fileName16K = fileName.replace(prefix, prefix + "_16k");
        var dstPath = Paths.get(pathUtils.getTempBasePath(), fileName16K).toString();
        // 传入的是PCM格式,转换为wav格式
        val startTime = System.currentTimeMillis();
        if (OpenAiConstants.AUDIO_FORMAT_PCM.equals(format)) {
            dstPath = dstPath.replace(".pcm", ".wav");
            AudioUtils.pcm2wav(srcPath, dstPath);
        } else if (OpenAiConstants.AUDIO_FORMAT_WAV.equals(format)) {
            AudioUtils.changeWavSampleRate(srcPath, dstPath, OpenAiConstants.AUDIO_SAMPLE_RATE_16K);
        }
        val endTime = System.currentTimeMillis();
        log.info("音频格式转换耗时：{}ms", endTime - startTime);
        if (FileUtil.exist(dstPath)) {
            return dstPath;
        }
        return srcPath;
    }

    private void generateAudioRecord(TextToSpeechResponse response, TextToSpeechRequest request) {
        val audioRecord = new AudioRecord();
        audioRecord.setUuid(UUID.fastUUID().toString());
        val audioData = response.getAudioData();

        val fileId = ossFileService.generateFileId();
        val dstAudioPath = Paths.get(pathUtils.getBasePath(), request.getClientId(),
                fileId + "." + OpenAiConstants.AUDIO_FORMAT_WAV).toString();
        MyFileUtils.decodeAndSaveFile(audioData, dstAudioPath);
        if (FileUtil.exist(dstAudioPath) && FileUtil.size(FileUtil.file(dstAudioPath)) > 0) {
            val ossFile = ossFileService.buildOssFile(fileId, fileId, dstAudioPath);
            val saveFlag = ossFileService.save(ossFile);
            if (saveFlag) {
                audioRecord.setFileId(fileId);
            }
        }
        audioRecord.setClientId(request.getClientId());
        audioRecord.setPrompt(request.getText());
        audioRecord.setType(PromptType.TEXT2SPEECH.getKey());
        audioRecord.setSpeaker(request.getSpeaker().getKey());
        audioRecord.setProvider(request.getProvider());
        audioRecord.setStatus(OpenAiConstants.AUDIO_RECORD_STATUS_COMPLETED);
        audioRecord.setCreateTime(LocalDateTime.now());
        audioRecordService.save(audioRecord);
    }
}
