package com.example.roleplay.controller;


import com.example.roleplay.domain.KnowledgeSnippet;
import com.example.roleplay.domain.SessionMemory;
import com.example.roleplay.domain.Skill;
import com.example.roleplay.service.KnowledgeService;
import com.example.roleplay.service.SessionService;
import com.example.roleplay.service.AIEnhancementService;
import com.example.roleplay.service.asr.ASRClient;
import com.example.roleplay.service.llm.LLMClient;
import com.example.roleplay.service.tts.TTSClient;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.http.HttpHeaders;
import org.springframework.http.MediaType;
import org.springframework.http.ResponseEntity;
import org.springframework.web.bind.annotation.*;
import org.springframework.web.multipart.MultipartFile;

import java.util.List;
import java.util.UUID;
import java.util.stream.Collectors;

/**
 * 语音对话控制器
 * 实现完整的语音输入->语音输出流程
 */
@Slf4j
@RestController
@RequestMapping("/api/voice")
@RequiredArgsConstructor
public class VoiceChatController {

    private final ASRClient asrClient;
    private final LLMClient llmClient;
    private final TTSClient ttsClient;
    private final KnowledgeService knowledgeService;
    private final SessionService sessionService;
    private final AIEnhancementService aiEnhancementService;

    /**
     * 完整的语音对话：音频输入 -> 文本识别 -> LLM对话 -> 音频合成 -> 音频输出
     */
    @PostMapping(value = "/chat", consumes = MediaType.MULTIPART_FORM_DATA_VALUE, produces = "audio/wav")
    public ResponseEntity<byte[]> voiceChat(
            @RequestParam("audio") MultipartFile audioFile,
            @RequestParam(defaultValue = "socrates") String personaId,
            @RequestParam(required = false) String sessionId,
            @RequestParam(defaultValue = "zh-CN") String language,
            @RequestParam(required = false) String voice) {

        try {
            long startTime = System.currentTimeMillis();
            log.info("开始语音对话 - 角色: {}, 会话: {}, 语言: {}", personaId, sessionId, language);

            // 如果没有提供sessionId，创建一个新的
            if (sessionId == null || sessionId.trim().isEmpty()) {
                sessionId = UUID.randomUUID().toString();
                log.info("创建新的语音会话: {}", sessionId);
            }

            // 确保会话存在
            if (!sessionService.isSessionValid(sessionId)) {
                // 需要先创建 SessionCreateRequest
                var createRequest = new com.example.roleplay.dto.SessionCreateRequest();
                createRequest.setPersonaId(personaId);
                createRequest.setPreferredLanguage(language);
                var createResponse = sessionService.createSession(createRequest);
                sessionId = createResponse.getSessionId();
                log.info("创建新的语音会话: {}", sessionId);
            }

            // 步骤1: 语音识别 (ASR)
            log.info("步骤1: 开始语音识别...");
            String recognizedText = asrClient.transcribe(audioFile, language);
            log.info("语音识别结果: {}", recognizedText);

            if (recognizedText == null || recognizedText.trim().isEmpty()) {
                return ResponseEntity.badRequest()
                        .header("X-Error", "ASR_FAILED")
                        .build();
            }

            // 步骤2: AI增强上下文检索（包含本地知识库和网络搜索）
            log.info("步骤2: 获取AI增强上下文...");
            List<String> enhancedContexts;
            try {
                enhancedContexts = aiEnhancementService.getEnhancedContext(recognizedText, 5);
                log.info("获取到 {} 个增强上下文", enhancedContexts.size());
                
                // 添加详细日志查看上下文内容
                for (int i = 0; i < enhancedContexts.size(); i++) {
                    String context = enhancedContexts.get(i);
                    log.debug("上下文{}: 内容预览={}", 
                             i+1, context.length() > 100 ? 
                             context.substring(0, 100) + "..." : context);
                }
            } catch (Exception e) {
                log.warn("AI增强上下文获取失败，尝试本地知识库检索: {}", e.getMessage());
                // 降级到本地知识库检索
                try {
                    List<KnowledgeSnippet> knowledgeSnippets = knowledgeService.searchSnippets(recognizedText, 3);
                    enhancedContexts = knowledgeSnippets.stream()
                            .map(snippet -> String.format("%s (%s)", snippet.getContent(), snippet.getCitationFormat()))
                            .collect(Collectors.toList());
                    log.info("降级检索到 {} 个知识片段", enhancedContexts.size());
                    
                    // 添加详细日志查看片段内容
                    for (int i = 0; i < knowledgeSnippets.size(); i++) {
                        KnowledgeSnippet snippet = knowledgeSnippets.get(i);
                        log.debug("片段{}: 标题={}, 内容预览={}", 
                                 i+1, snippet.getTitle(), 
                                 snippet.getContent().length() > 50 ? 
                                 snippet.getContent().substring(0, 50) + "..." : snippet.getContent());
                    }
                } catch (Exception fallbackE) {
                    log.warn("本地知识库检索也失败，使用无上下文对话: {}", fallbackE.getMessage());
                    enhancedContexts = List.of();
                }
            }

            // 步骤3: LLM对话生成
            log.info("步骤3: 生成LLM回复...");
            String llmResponse = llmClient.chat(personaId, sessionId, recognizedText, enhancedContexts);
            log.info("LLM回复: {}", llmResponse);

            // 更新会话记忆
            sessionService.updateSessionMemory(sessionId, recognizedText, llmResponse,
                    SessionMemory.MessageType.VOICE, List.of(Skill.MEMORY, Skill.CITATION));

            // 步骤4: 语音合成 (TTS)
            log.info("步骤4: 合成语音回复...");
            byte[] audioResponse = ttsClient.synthesize(llmResponse, voice, language);
            log.info("语音合成完成，音频大小: {} bytes", audioResponse.length);

            // 准备响应
            HttpHeaders headers = new HttpHeaders();
            // 智谱TTS返回PCM音频数据，设置正确的Content-Type
            headers.setContentType(MediaType.valueOf("audio/wav"));
            headers.setContentLength(audioResponse.length);
            headers.set("Content-Disposition", "inline; filename=\"voice-response.wav\"");
            headers.set("X-Session-Id", sessionId);
            // 使用Base64编码中文内容避免HTTP头编码问题
            headers.set("X-Recognized-Text-Base64", 
                java.util.Base64.getEncoder().encodeToString(recognizedText.getBytes(java.nio.charset.StandardCharsets.UTF_8)));
            headers.set("X-Response-Text-Base64", 
                java.util.Base64.getEncoder().encodeToString(llmResponse.getBytes(java.nio.charset.StandardCharsets.UTF_8)));
            headers.set("X-Processing-Time", String.valueOf(System.currentTimeMillis() - startTime));

            log.info("语音对话完成 - 会话: {}, 总耗时: {}ms", sessionId, System.currentTimeMillis() - startTime);

            return ResponseEntity.ok()
                    .headers(headers)
                    .body(audioResponse);

        } catch (Exception e) {
            log.error("语音对话失败", e);
            return ResponseEntity.internalServerError()
                    .header("X-Error", "VOICE_CHAT_FAILED")
                    .build();
        }
    }

    /**
     * 仅语音识别
     */
    @PostMapping(value = "/transcribe", consumes = MediaType.MULTIPART_FORM_DATA_VALUE)
    public ResponseEntity<String> transcribeOnly(
            @RequestParam("audio") MultipartFile audioFile,
            @RequestParam(defaultValue = "zh-CN") String language) {

        try {
            String text = asrClient.transcribe(audioFile, language);
            return ResponseEntity.ok(text);
        } catch (Exception e) {
            log.error("语音识别失败", e);
            return ResponseEntity.internalServerError()
                    .body("ASR recognition failed: " + e.getMessage());
        }
    }

    /**
     * 仅语音合成
     */
    @GetMapping(value = "/synthesize", produces = "audio/wav")
    public ResponseEntity<byte[]> synthesizeOnly(
            @RequestParam String text,
            @RequestParam(required = false) String voice,
            @RequestParam(defaultValue = "zh-CN") String language) {

        try {
            if (text.length() > 1000) {
                text = text.substring(0, 1000);
            }

            byte[] audioData = ttsClient.synthesize(text, voice, language);

            HttpHeaders headers = new HttpHeaders();
            headers.setContentType(MediaType.valueOf("audio/wav"));
            headers.setContentLength(audioData.length);
            headers.set("Content-Disposition", "inline; filename=\"synthesized.wav\"");

            return ResponseEntity.ok()
                    .headers(headers)
                    .body(audioData);

        } catch (Exception e) {
            log.error("语音合成失败", e);
            return ResponseEntity.internalServerError()
                    .header("X-Error", "TTS_SYNTHESIS_FAILED")
                    .build();
        }
    }

    /**
     * 获取支持的语音和语言
     */
    @GetMapping("/capabilities")
    public ResponseEntity<Object> getCapabilities() {
        try {
            return ResponseEntity.ok(new Object() {
                public final String[] supportedLanguages = asrClient.getSupportedLanguages();
                public final String[] supportedVoices = ttsClient.getSupportedVoices();
                public final String[] supportedAudioFormats = asrClient.getSupportedFormats();
                public final boolean asrHealthy = asrClient.isHealthy();
                public final boolean ttsHealthy = ttsClient.isHealthy();
            });
        } catch (Exception e) {
            log.error("获取语音能力信息失败", e);
            return ResponseEntity.internalServerError().build();
        }
    }
}