package com.example.roleplay.service.tts;

import com.example.roleplay.config.RoleplayProperties;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.http.*;
import org.springframework.stereotype.Service;
import org.springframework.web.client.RestTemplate;
import reactor.core.publisher.Flux;

import java.time.Duration;
import java.util.Base64;
import java.util.HashMap;
import java.util.Map;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;

/**
 * 智谱AI CogTTS语音合成客户端实现（基于HTTP API）
 */
@Slf4j
@Service
@RequiredArgsConstructor
@ConditionalOnProperty(name = "roleplay.provider.tts", havingValue = "cogtts")
public class ZhipuTTSClient implements TTSClient {

    private final RoleplayProperties properties;
    private final RestTemplate restTemplate;
    private final ObjectMapper objectMapper;

    // 智谱AI CogTTS API端点
    private static final String TTS_ENDPOINT = "/audio/speech";

    @Override
    public byte[] synthesize(String text, String voice, String language) {
        try {
            log.info("CogTTS 合成语音 - 文本长度: {}, 声音: {}, 语言: {}", text.length(), voice, language);
            
            String url = properties.getZhipu().getBaseUrl() + TTS_ENDPOINT;
            log.info("CogTTS API URL: {}", url);  // 调试日志
            
            // 准备请求体
            Map<String, Object> requestBody = createTTSRequest(text, voice, language, false);
            
            // 准备请求头
            HttpHeaders headers = new HttpHeaders();
            headers.setContentType(MediaType.APPLICATION_JSON);
            headers.setBearerAuth(properties.getZhipu().getApiKey());
            
            HttpEntity<Map<String, Object>> requestEntity = new HttpEntity<>(requestBody, headers);
            
            log.debug("发送CogTTS请求: URL={}, 文本长度={}", url, text.length());
            log.debug("请求头: {}", headers);
            log.debug("请求体: {}", requestBody);
            
            ResponseEntity<byte[]> response = restTemplate.postForEntity(url, requestEntity, byte[].class);
            
            if (response.getStatusCode().is2xxSuccessful()) {
                byte[] audioData = response.getBody();
                log.info("CogTTS 合成成功: {} bytes", audioData != null ? audioData.length : 0);
                
                // 将PCM数据转换为WAV格式
                if (audioData != null) {
                    audioData = convertPcmToWav(audioData);
                    log.info("转换为WAV格式后: {} bytes", audioData.length);
                }
                
                return audioData;
            } else {
                String errorMsg = "CogTTS 合成失败: HTTP " + response.getStatusCode();
                log.error(errorMsg);
                throw new RuntimeException(errorMsg);
            }
            
        } catch (Exception e) {
            log.error("CogTTS API调用失败", e);
            throw new RuntimeException("CogTTS API调用失败: " + e.getMessage(), e);
        }
    }

    @Override
    public Flux<byte[]> synthesizeStream(String text, String voice, String language) {
        log.info("CogTTS 流式合成 - 文本长度: {}, 声音: {}, 语言: {}", text.length(), voice, language);
        
        return Flux.<byte[]>create(sink -> {
            try {
                String url = properties.getZhipu().getBaseUrl() + TTS_ENDPOINT;
                log.info("CogTTS 流式 API URL: {}", url);  // 调试日志
                
                // 准备流式请求体
                Map<String, Object> requestBody = createTTSRequest(text, voice, language, true);
                
                // 准备请求头
                HttpHeaders headers = new HttpHeaders();
                headers.setContentType(MediaType.APPLICATION_JSON);
                headers.setBearerAuth(properties.getZhipu().getApiKey());
                
                HttpEntity<Map<String, Object>> requestEntity = new HttpEntity<>(requestBody, headers);
                
                log.debug("发送CogTTS流式请求: URL={}, 文本长度={}", url, text.length());
                log.debug("流式请求头: {}", headers);
                log.debug("流式请求体: {}", requestBody);
                
                // 使用Spring的RestTemplate处理流式响应比较复杂，这里简化处理
                // 实际生产环境建议使用WebClient或HttpClient处理SSE流
                ResponseEntity<String> response = restTemplate.postForEntity(url, requestEntity, String.class);
                
                if (response.getStatusCode().is2xxSuccessful()) {
                    // 解析流式响应
                    parseStreamResponse(response.getBody(), sink);
                } else {
                    sink.error(new RuntimeException("CogTTS 流式合成失败: HTTP " + response.getStatusCode()));
                }
                
            } catch (Exception e) {
                log.error("CogTTS 流式合成失败", e);
                sink.error(e);
            }
        }).delayElements(Duration.ofMillis(100)); // 添加延迟模拟真实流式
    }

    @Override
    public boolean isHealthy() {
        try {
            // 简单的健康检查 - 检查API密钥和基础URL是否配置
            return properties.getZhipu().getApiKey() != null && 
                   !properties.getZhipu().getApiKey().isEmpty();
        } catch (Exception e) {
            log.warn("CogTTS 健康检查失败", e);
            return false;
        }
    }

    @Override
    public String[] getSupportedVoices() {
        // 智谱AI CogTTS支持的音色名称
        return new String[]{"tongtong", "chuichui", "xiaochen", "jam", "kazi", "douji", "luodo"};
    }

    @Override
    public String[] getSupportedLanguages() {
        return new String[]{"zh-CN", "en-US", "zh-TW", "ja-JP", "ko-KR"};
    }

    @Override
    public String getDefaultVoice(String language) {
        return switch (language) {
            case "zh-CN" -> "tongtong";     // 使用智谱AI CogTTS的默认音色
            case "en-US" -> "tongtong";
            case "zh-TW" -> "tongtong";
            case "ja-JP" -> "tongtong";
            case "ko-KR" -> "tongtong";
            default -> "tongtong";         // 默认音色
        };
    }

    /**
     * 创建TTS请求体 - 使用智谱AI CogTTS的正确参数格式
     */
    private Map<String, Object> createTTSRequest(String text, String voice, String language, boolean stream) {
        Map<String, Object> requestBody = new HashMap<>();
        
        // 基本参数 - 根据智谱AI官方文档
        requestBody.put("model", "cogtts");  // 智谱AI CogTTS正确的模型名称
        requestBody.put("input", text);
        
        // 设置音色 - 使用字符串音色名称
        String selectedVoice = voice != null ? voice : getDefaultVoice(language);
        // 将音色转换为智谱AI支持的音色名称
        String voiceName = convertVoiceToName(selectedVoice);
        requestBody.put("voice", voiceName);
        
        log.debug("CogTTS请求参数: {}", requestBody);
        return requestBody;
    }
    
    /**
     * 将音色名称转换为智谱AI支持的音色名称
     */
    private String convertVoiceToName(String voice) {
        // 如果已经是智谱AI支持的音色名称，直接返回
        if ("tongtong".equals(voice) || "chuichui".equals(voice) || "xiaochen".equals(voice) ||
            "jam".equals(voice) || "kazi".equals(voice) || "douji".equals(voice) || "luodo".equals(voice)) {
            return voice;
        }
        
        // 将一些常见音色名称和数字ID映射为智谱AI音色名称
        return switch (voice.toLowerCase()) {
            case "1", "alloy", "female", "彤彤" -> "tongtong";
            case "2", "echo", "male", "小陈" -> "xiaochen";
            case "3", "fable", "锤锤" -> "chuichui";
            case "4", "onyx", "jam" -> "jam";
            case "5", "nova", "kazi" -> "kazi";
            case "6", "shimmer", "douji" -> "douji";
            case "7", "luodo" -> "luodo";
            default -> "tongtong"; // 默认使用彤彤音色
        };
    }
    
    /**
     * 解析流式响应
     */
    private void parseStreamResponse(String responseBody, reactor.core.publisher.FluxSink<byte[]> sink) {
        if (responseBody == null || responseBody.isEmpty()) {
            sink.complete();
            return;
        }
        
        try {
            // 解析SSE格式的响应
            String[] lines = responseBody.split("\\n");
            
            for (String line : lines) {
                if (line.startsWith("data: ")) {
                    String dataContent = line.substring(6); // 移除 "data: " 前缀
                    
                    if (dataContent.trim().equals("[DONE]")) {
                        // 流结束标记
                        sink.complete();
                        return;
                    }
                    
                    try {
                        JsonNode jsonData = objectMapper.readTree(dataContent);
                        
                        // 检查是否是错误响应
                        if (jsonData.has("error")) {
                            String errorMsg = jsonData.path("error").path("message").asText();
                            sink.error(new RuntimeException("CogTTS 流式合成错误: " + errorMsg));
                            return;
                        }
                        
                        // 检查是否完成
                        if (jsonData.has("choices")) {
                            JsonNode choices = jsonData.path("choices");
                            if (choices.isArray() && choices.size() > 0) {
                                JsonNode firstChoice = choices.get(0);
                                
                                // 检查完成原因
                                if (firstChoice.has("finish_reason") && 
                                    "stop".equals(firstChoice.path("finish_reason").asText())) {
                                    sink.complete();
                                    return;
                                }
                                
                                // 提取音频数据
                                if (firstChoice.has("delta")) {
                                    JsonNode delta = firstChoice.path("delta");
                                    if (delta.has("content")) {
                                        String base64Audio = delta.path("content").asText();
                                        if (!base64Audio.isEmpty()) {
                                            byte[] audioChunk = Base64.getDecoder().decode(base64Audio);
                                            sink.next(audioChunk);
                                        }
                                    }
                                }
                            }
                        }
                    } catch (Exception e) {
                        log.warn("解析流式数据失败: {}", dataContent, e);
                        // 继续处理下一行，不中断整个流
                    }
                }
            }
            
            // 如果没有明确的结束标记，也完成流
            sink.complete();
            
        } catch (Exception e) {
            log.error("解析流式响应失败", e);
            sink.error(e);
        }
    }
    
    /**
     * 将PCM音频数据转换为WAV格式
     * 智谱CogTTS返回的是PCM原始音频数据，需要添加WAV文件头
     */
    private byte[] convertPcmToWav(byte[] pcmData) {
        try {
            // WAV文件参数（根据智谱AI CogTTS的音频参数）
            int sampleRate = 24000;  // 采样率：24kHz
            int channels = 1;        // 单声道
            int bitsPerSample = 16;  // 16位采样深度
            
            ByteArrayOutputStream wavStream = new ByteArrayOutputStream();
            
            // 写入WAV文件头
            writeWavHeader(wavStream, pcmData.length, sampleRate, channels, bitsPerSample);
            
            // 写入PCM音频数据
            wavStream.write(pcmData);
            
            return wavStream.toByteArray();
            
        } catch (IOException e) {
            log.error("PCM转WAV失败", e);
            // 如果转换失败，返回原始PCM数据
            return pcmData;
        }
    }
    
    /**
     * 写入WAV文件头
     */
    private void writeWavHeader(ByteArrayOutputStream stream, int pcmDataLength, 
                               int sampleRate, int channels, int bitsPerSample) throws IOException {
        
        int byteRate = sampleRate * channels * bitsPerSample / 8;
        int blockAlign = channels * bitsPerSample / 8;
        int dataChunkSize = pcmDataLength;
        int fileSize = 36 + dataChunkSize;
        
        ByteBuffer buffer = ByteBuffer.allocate(44);
        buffer.order(ByteOrder.LITTLE_ENDIAN);
        
        // RIFF头
        buffer.put("RIFF".getBytes());
        buffer.putInt(fileSize);
        buffer.put("WAVE".getBytes());
        
        // fmt子块
        buffer.put("fmt ".getBytes());
        buffer.putInt(16);           // fmt子块大小
        buffer.putShort((short) 1);  // 音频格式（PCM）
        buffer.putShort((short) channels);
        buffer.putInt(sampleRate);
        buffer.putInt(byteRate);
        buffer.putShort((short) blockAlign);
        buffer.putShort((short) bitsPerSample);
        
        // data子块
        buffer.put("data".getBytes());
        buffer.putInt(dataChunkSize);
        
        stream.write(buffer.array());
    }
}