package com.llm.travel_agent_assistant.service.impl;

import com.alibaba.dashscope.audio.asr.recognition.Recognition;
import com.alibaba.dashscope.audio.asr.recognition.RecognitionParam;
import com.alibaba.dashscope.exception.NoApiKeyException;
import com.llm.travel_agent_assistant.dto.BudgetAnalyzeDTO;
import com.llm.travel_agent_assistant.dto.GeneratePlanDTO;
import com.llm.travel_agent_assistant.dto.Result;
import com.llm.travel_agent_assistant.service.AsrService;
import com.llm.travel_agent_assistant.service.OpenAiService;
import io.reactivex.BackpressureStrategy;
import io.reactivex.Flowable;
import jakarta.annotation.Resource;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;

/**
 * 语音识别服务实现类
 */
@Slf4j
@Service
public class AsrServiceImpl implements AsrService {

    @Value("${spring.ai.dashscope.api-key}")
    private String apiKey;
    @Resource
    private OpenAiService openAiService;

    @Override
    public Result realTimeSpeechRecognition(byte[] audioData) {
        // 对于实时识别，我们使用流式处理
        return streamRealTimeRecognition(audioData, "");
    }

    @Override
    public Result syncSpeechRecognition(File audioFile) {
        try {
            // 读取音频文件并转换为字节数组
            byte[] audioData = readAudioFile(audioFile);
            return streamRealTimeRecognition(audioData, "");
        } catch (IOException e) {
            log.error("读取音频文件失败", e);
            throw new RuntimeException("读取音频文件失败", e);
        }
    }

    @Override
    public Result realTimeRecognitionFromMicrophone() {
        // 模拟麦克风输入，实际项目中需要集成音频采集
        return streamRealTimeRecognition(generateMockAudioData(), "");
    }

    @Override
    public Result streamRealTimeRecognition(byte[] audioData, String userId) {
        Recognition recognizer = null;
        try {
            log.info("用户 {} 开始语音识别，音频数据大小: {} 字节", userId, audioData.length);
            
            // 创建音频流
            Flowable<ByteBuffer> audioSource = createAudioSource(audioData);

            // 创建识别器和参数
            recognizer = new Recognition();
            RecognitionParam param = RecognitionParam.builder()
                    .model("fun-asr-realtime")
                    .format("pcm")
                    .apiKey(apiKey)
                    .sampleRate(16000)
                    .build();

            StringBuilder resultBuilder = new StringBuilder();

            // 流式调用接口
            recognizer.streamCall(param, audioSource)
                    .blockingForEach(result -> {
                        if (result.isSentenceEnd()) {
                            String finalResult = result.getSentence().getText();
                            log.info("用户 {} 语音识别结果: {}", userId, finalResult);
                            resultBuilder.append(finalResult);
                        } else {
                            String intermediateResult = result.getSentence().getText();
                            log.debug("用户 {} 中间识别结果: {}", userId, intermediateResult);
                        }
                    });

            // 获取用户文本
            String userMessage = resultBuilder.toString();
            log.info("用户 {} 语音识别完成，最终文本: {}", userId, userMessage);

            return openAiService.generateTravelPlan(new GeneratePlanDTO(userId, userMessage));

        } catch (NoApiKeyException e) {
            log.error("API Key配置错误", e);
            throw new RuntimeException("API Key配置错误", e);
        } catch (Exception e) {
            log.error("语音识别失败", e);
            throw new RuntimeException("语音识别失败", e);
        } finally {
            // 安全关闭识别器，避免状态错误
            if (recognizer != null) {
                try {
                    recognizer.stop();
                } catch (Exception e) {
                    log.warn("关闭识别器时发生警告，但继续执行: {}", e.getMessage());
                }
            }
        }
    }

    @Override
    public Result streamRealTimeRecognitionForBudgetAnalyze(byte[] audioData, String userId) {
        Recognition recognizer = null;
        try {
            log.info("用户 {} 开始语音识别，音频数据大小: {} 字节", userId, audioData.length);

            // 创建音频流
            Flowable<ByteBuffer> audioSource = createAudioSource(audioData);

            // 创建识别器和参数
            recognizer = new Recognition();
            RecognitionParam param = RecognitionParam.builder()
                    .model("fun-asr-realtime")
                    .format("pcm")
                    .apiKey(apiKey)
                    .sampleRate(16000)
                    .build();

            StringBuilder resultBuilder = new StringBuilder();

            // 流式调用接口
            recognizer.streamCall(param, audioSource)
                    .blockingForEach(result -> {
                        if (result.isSentenceEnd()) {
                            String finalResult = result.getSentence().getText();
                            log.info("用户 {} 语音识别结果: {}", userId, finalResult);
                            resultBuilder.append(finalResult);
                        } else {
                            String intermediateResult = result.getSentence().getText();
                            log.debug("用户 {} 中间识别结果: {}", userId, intermediateResult);
                        }
                    });

            // 获取用户文本
            String userMessage = resultBuilder.toString();
            log.info("用户 {} 语音识别完成，最终文本: {}", userId, userMessage);

            return openAiService.budgetAnalyze(new BudgetAnalyzeDTO(userId, userMessage));

        } catch (NoApiKeyException e) {
            log.error("API Key配置错误", e);
            throw new RuntimeException("API Key配置错误", e);
        } catch (Exception e) {
            log.error("语音识别失败", e);
            throw new RuntimeException("语音识别失败", e);
        } finally {
            // 安全关闭识别器，避免状态错误
            if (recognizer != null) {
                try {
                    recognizer.stop();
                } catch (Exception e) {
                    log.warn("关闭识别器时发生警告，但继续执行: {}", e.getMessage());
                }
            }
        }
    }

    /**
     * 创建音频流
     */
    private Flowable<ByteBuffer> createAudioSource(byte[] audioData) {
        return Flowable.create(emitter -> {
            try {
                int chunkSize = 1024;
                int offset = 0;
                
                while (offset < audioData.length) {
                    int remaining = audioData.length - offset;
                    int currentChunkSize = Math.min(chunkSize, remaining);
                    
                    ByteBuffer buffer = ByteBuffer.wrap(audioData, offset, currentChunkSize);
                    emitter.onNext(buffer);
                    
                    offset += currentChunkSize;
                    
                    // 模拟实时流传输延迟
                    if (offset < audioData.length) {
                        Thread.sleep(20);
                    }
                }
                
                emitter.onComplete();
            } catch (Exception e) {
                emitter.onError(e);
            }
        }, BackpressureStrategy.BUFFER);
    }

    /**
     * 读取音频文件
     */
    private byte[] readAudioFile(File audioFile) throws IOException {
        try (FileInputStream fis = new FileInputStream(audioFile)) {
            return fis.readAllBytes();
        }
    }

    /**
     * 生成模拟音频数据（用于测试）
     */
    private byte[] generateMockAudioData() {
        // 生成1秒的静音PCM数据（16000Hz, 16bit, 单声道）
        int sampleRate = 16000;
        int durationMs = 1000;
        int numSamples = sampleRate * durationMs / 1000;
        byte[] audioData = new byte[numSamples * 2]; // 16bit = 2字节
        
        // 填充静音数据（0值）
        for (int i = 0; i < audioData.length; i++) {
            audioData[i] = 0;
        }
        
        return audioData;
    }
}