package com.yang.service.impl;

import com.alibaba.dashscope.audio.tts.SpeechSynthesisResult;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesisParam;
import com.alibaba.dashscope.audio.ttsv2.SpeechSynthesizer;
import com.alibaba.dashscope.common.ResultCallback;
import com.google.gson.Gson;
import com.yang.entity.DigitalHuman;
import com.yang.repository.DigitalHumanRepository;
import com.yang.repository.VideoTaskRepository;
import com.yang.service.GenerateVideoService;
import com.yang.thirdparty.aliyun.properties.AliyunProperties;
import com.yang.dto.DigitalHumanTaskDTO;
import com.yang.dto.DigitalHumanVideoDTO;
import com.yang.entity.AudioRecord;
import com.yang.exception.ClientException;
import com.yang.repository.AudioRecordRepository;
import com.yang.repository.ImageRecordRepository;
import com.yang.service.GenerateAudioService;
import com.yang.type.DigitalHumanStatusEnum;
import com.yang.type.GenerateVideoActionEnum;
import com.yang.util.AliOssUtil;
import com.yang.util.AudioUtil;
import com.yang.util.TimeUtils;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Transactional;

import java.io.FileInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CountDownLatch;

import com.yang.entity.VideoTask;

/**
 * 音频生成服务实现类
 * 作者： yang
 * 时间： 2025/07/08
 */
@Slf4j
@Service
@RequiredArgsConstructor
public class GenerateAudioServiceImpl implements GenerateAudioService {

    private final AliyunProperties aliModelConfig;
    private final AudioRecordRepository audioRecordRepository;
    private final DigitalHumanRepository digitalHumanRepository;
    private final VideoTaskRepository videoTaskRepository;

    /**
     * 生成音频文件，并可选更新音频记录
     *
     * @param text        文本内容
     * @param audioRecord 可选的音频记录对象，如果提供则会在完成时更新状态
     * @return 生成的音频文件路径
     * @throws IOException 如果音频文件创建失败
     */
    public String generateAudioFile(String text, AudioRecord audioRecord) throws IOException {
        // 创建音频文件处理器
        AudioUtil.AudioFileHandler audioHandler = AudioUtil.createAudioFile();

        // 使用异步请求
        CountDownLatch latch = new CountDownLatch(1);
        // 实现回调接口ResultCallback
        ResultCallback<SpeechSynthesisResult> callback = new ResultCallback<>() {
            @Override
            public void onEvent(SpeechSynthesisResult result) {
                if (result.getAudioFrame() != null) {
                    try {
                        // 将音频帧数据转换为 ByteBuffer 类型
                        ByteBuffer byteBuffer = result.getAudioFrame();
                        if (byteBuffer != null) {
                            audioHandler.writeData(byteBuffer);
                            log.debug("{} 收到音频，已保存", TimeUtils.getTimestamp());
                        }
                    } catch (IOException e) {
                        log.error("写入音频数据失败: {}", e.getMessage());
                        onError(e);
                    }
                }
            }

            @Override
            public void onComplete() {
                log.info("{} 收到Complete，语音合成结束", TimeUtils.getTimestamp());
                // 更新音频记录状态
                if (audioRecord != null) {
                    try {
                        // 获取生成的文件路径
                        String audioFilePath = audioHandler.getFilePath();
                        Path path = Paths.get(audioFilePath);
                        String fileName = path.getFileName().toString();

                        // 更新记录状态
                        audioRecord.setStatus(DigitalHumanStatusEnum.DONE);
                        audioRecord.setFilePath(audioFilePath);
                        audioRecord.setFileName(fileName);
                        audioRecord.setAccessUrl(audioFilePath);
                        log.info("音频记录已更新，ID: {}", audioRecord.getId());
                    } catch (Exception e) {
                        log.error("更新音频记录失败: {}", e.getMessage());
                    }
                }
                latch.countDown();
            }

            @Override
            public void onError(Exception e) {
                log.info("出现异常：{}", e.toString());
                // 如果有音频记录，更新其状态为失败
                if (audioRecord != null) {
                    audioRecord.setStatus(DigitalHumanStatusEnum.FAILED);
                    audioRecord.setFailedCount(audioRecord.getFailedCount() + 1);
                    audioRecordRepository.save(audioRecord);
                    log.error("音频生成失败，已更新记录状态，ID: {}", audioRecord.getId());
                }
                latch.countDown();
            }
        };

        // 获取模型的音色参数
        String speaker = aliModelConfig.getCosyVoice();

        // 请求参数
        SpeechSynthesisParam param =
                SpeechSynthesisParam.builder()
                        // 若没有将API Key配置到环境变量中，需将下面这行代码注释放开，并将your-api-key替换为自己的API Key
                        .apiKey(aliModelConfig.getApiKey())
                        .model(aliModelConfig.getCosyModel()) // 模型
                        .voice(speaker) // 音色
                        .build();
        // 第二个参数"callback"传入回调即启用异步模式
        SpeechSynthesizer synthesizer = new SpeechSynthesizer(param, callback);
        // 非阻塞调用，立即返回null（实际结果通过回调接口异步传递），在回调接口的onEvent方法中实时获取二进制音频
        synthesizer.call(text);
        // 等待合成完成
        try {
            latch.await();
            // 等待播放线程全部播放完
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        }
        System.out.println(
                "[Metric] requestId为："
                        + synthesizer.getLastRequestId()
                        + "，首包延迟（毫秒）为："
                        + synthesizer.getFirstPackageDelay());

        // 关闭音频文件
        audioHandler.close();

        // 获取生成的音频文件路径
        return audioHandler.getFilePath();
    }

    @Override
    public void executeAudioGenerationTasks() {
        log.info("开始处理待生成音频任务");

        try {
            // 使用JPA方式查询待处理的音频生成任务
            List<AudioRecord> pendingTasks = audioRecordRepository.findAll(
                    (root, query, criteriaBuilder) ->
                            criteriaBuilder.equal(root.get("status"), DigitalHumanStatusEnum.IN_PROGRESS));
            log.info("查询到{}个待处理音频任务", pendingTasks.size());

            // 处理每个任务
            for (AudioRecord task : pendingTasks) {
                try {
                    processAudioTask(task);
                } catch (Exception e) {
                    log.error("处理音频任务失败，ID: {}", task.getId(), e);
                }
            }

        } catch (Exception e) {
            log.error("执行音频生成定时任务过程中发生错误", e);
        }

        log.info("音频生成任务处理完成");
    }

    /**
     * 处理单个音频任务
     *
     * @param task 音频任务
     */
    @Transactional(rollbackFor = Exception.class)
    private void processAudioTask(AudioRecord task) {
        try {
            // 生成音频并自动更新记录状态
            String audioFilePath = generateAudioFile(task.getSpeakText(), task);

            // 上传音频到OSS
            String audioUrl;
            try (FileInputStream audioInputStream = new FileInputStream(audioFilePath)) {
                String audioName = UUID.randomUUID() + ".wav";
                audioUrl = AliOssUtil.uploadFile("audio/" + audioName, audioInputStream);
            }
            // 更新数据
            task.setAccessUrl(audioUrl);
            task.setFilePath(audioFilePath);
            audioRecordRepository.save(task);

            // 创建视频任务记录
            createVideoTask(task);
            log.info("音频任务处理成功，ID: {}", task.getId());
        } catch (Exception e) {
            // 更新任务状态为失败
            task.setStatus(DigitalHumanStatusEnum.FAILED);
            task.setFailedCount(task.getFailedCount() + 1);
            audioRecordRepository.save(task);

            log.error("处理音频任务失败，ID: {}", task.getId(), e);
        }
    }

    /**
     * 创建视频生成任务
     *
     * @param audioRecord   音频记录对象
     */
    private void createVideoTask(AudioRecord audioRecord) {
        try {
            // 获取数字人信息
            DigitalHuman digitalHuman = digitalHumanRepository.findById(audioRecord.getDigitalHumanId())
                    .orElseThrow(() -> new ClientException("未找到对应的数字人信息"));

            // 创建视频任务记录
            VideoTask videoTask = new VideoTask();
            videoTask.setDigitalHumanId(audioRecord.getDigitalHumanId());
            videoTask.setAction(GenerateVideoActionEnum.CREATE_VIDEO);
            videoTask.setStatus(DigitalHumanStatusEnum.IN_PROGRESS); // 设置为待处理状态

            // 构建input_params参数
            Map<String, Object> inputParams = new HashMap<>();

            // 构建主体参数
            inputParams.put("model", aliModelConfig.getLiveModel());

            // 构建input子对象
            Map<String, String> inputObj = new HashMap<>();
            inputObj.put("image_url", digitalHuman.getInputImageUrl());
            inputObj.put("audio_url", audioRecord.getAccessUrl());
            inputParams.put("input", inputObj);

            // 设置输入参数
            videoTask.setInputParams(new Gson().toJson(inputParams));

            videoTaskRepository.save(videoTask);

            log.info("创建视频任务成功，数字人ID: {}，音频记录ID: {}, 视频任务ID: {}，参数: {}",
                    audioRecord.getDigitalHumanId(), audioRecord.getId(), videoTask.getId(), videoTask.getInputParams());
        } catch (Exception e) {
            log.error("创建视频任务失败，数字人ID: {}，音频记录ID: {}",
                    audioRecord.getDigitalHumanId(), audioRecord.getId(), e);
            throw e;
        }
    }
}

