package com.cardcaptorsakura.service.output;

import com.cardcaptorsakura.model.dto.VoiceSynthesisRequest;
import com.cardcaptorsakura.model.dto.VoiceSynthesisResponse;
import com.cardcaptorsakura.model.entity.PersonaConfig;
import com.cardcaptorsakura.service.persona.PersonaService;
import com.cardcaptorsakura.config.AIConfig;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.audio.speech.SpeechClient;
import org.springframework.ai.audio.speech.SpeechPrompt;
import org.springframework.ai.audio.speech.SpeechResponse;
import org.springframework.ai.audio.speech.SpeechOptions;
import org.springframework.stereotype.Service;
import org.springframework.util.StringUtils;

import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.CompletableFuture;

/**
 * 语音合成服务
 * 负责将文本转换为语音，支持不同的声音风格和语言
 */
@Slf4j
@Service
@RequiredArgsConstructor
public class VoiceSynthesisService {

    private final SpeechClient speechClient;
    private final PersonaService personaService;
    private final AIConfig aiConfig;
    
    private static final String AUDIO_STORAGE_PATH = "uploads/generated-audio/";
    private static final String DEFAULT_VOICE = "alloy";
    private static final String DEFAULT_FORMAT = "mp3";
    private static final double DEFAULT_SPEED = 1.0;

    /**
     * 同步语音合成
     *
     * @param request 语音合成请求
     * @return 语音合成响应
     */
    public VoiceSynthesisResponse synthesizeVoice(VoiceSynthesisRequest request) {
        try {
            // 1. 获取当前人格配置
            PersonaConfig persona = personaService.getActivePersona(request.getUserId());
            
            // 2. 选择合适的声音
            String selectedVoice = selectVoiceForPersona(persona, request.getVoice());
            
            // 3. 预处理文本
            String processedText = preprocessText(request.getText(), persona);
            
            // 4. 配置语音选项
            SpeechOptions options = buildSpeechOptions(request, selectedVoice);
            
            // 5. 调用语音合成API
            SpeechPrompt prompt = new SpeechPrompt(processedText, options);
            SpeechResponse response = speechClient.call(prompt);
            
            // 6. 保存音频文件
            byte[] audioData = response.getResult().getOutput();
            String savedAudioPath = saveAudioFile(audioData, request.getUserId(), request.getFormat());
            
            log.info("Synthesized voice for user: {}, persona: {}, voice: {}, text length: {}", 
                    request.getUserId(), persona.getId(), selectedVoice, processedText.length());
            
            return VoiceSynthesisResponse.builder()
                    .audioPath(savedAudioPath)
                    .voice(selectedVoice)
                    .format(request.getFormat())
                    .speed(request.getSpeed())
                    .duration(estimateAudioDuration(processedText, request.getSpeed()))
                    .text(processedText)
                    .originalText(request.getText())
                    .synthesizedAt(LocalDateTime.now())
                    .build();
            
        } catch (Exception e) {
            log.error("Error synthesizing voice for user: {}", request.getUserId(), e);
            throw new RuntimeException("Failed to synthesize voice: " + e.getMessage(), e);
        }
    }

    /**
     * 异步语音合成
     */
    public CompletableFuture<VoiceSynthesisResponse> synthesizeVoiceAsync(VoiceSynthesisRequest request) {
        return CompletableFuture.supplyAsync(() -> synthesizeVoice(request));
    }

    /**
     * 为人格选择合适的声音
     */
    private String selectVoiceForPersona(PersonaConfig persona, String requestedVoice) {
        // 如果用户指定了声音，优先使用
        if (StringUtils.hasText(requestedVoice)) {
            return requestedVoice;
        }
        
        // 根据人格特质选择声音
        Map<String, Object> traits = persona.getTraits();
        
        // 根据人格类型选择默认声音
        switch (persona.getId()) {
            case "doctor":
                return "nova"; // 专业、权威的声音
            case "chef":
                return "shimmer"; // 温暖、友好的声音
            case "companion":
                return "alloy"; // 温柔、陪伴的声音
            default:
                // 根据人格特质动态选择
                if (traits != null) {
                    Double empathyLevel = (Double) traits.get("empathyLevel");
                    if (empathyLevel != null && empathyLevel > 0.7) {
                        return "alloy"; // 高同理心使用温柔声音
                    }
                    
                    Double humorLevel = (Double) traits.get("humorLevel");
                    if (humorLevel != null && humorLevel > 0.7) {
                        return "echo"; // 高幽默感使用活泼声音
                    }
                }
                return DEFAULT_VOICE;
        }
    }

    /**
     * 预处理文本
     */
    private String preprocessText(String text, PersonaConfig persona) {
        if (!StringUtils.hasText(text)) {
            return "";
        }
        
        StringBuilder processedText = new StringBuilder(text);
        
        // 根据人格特质调整文本
        Map<String, Object> traits = persona.getTraits();
        if (traits != null) {
            // 如果幽默程度高，可以添加一些语调标记
            Double humorLevel = (Double) traits.get("humorLevel");
            if (humorLevel != null && humorLevel > 0.7) {
                // 在适当位置添加停顿，增强幽默效果
                processedText = new StringBuilder(processedText.toString().replaceAll("([。！？])", "$1 "));
            }
            
            // 如果同理心程度高，语调更温和
            Double empathyLevel = (Double) traits.get("empathyLevel");
            if (empathyLevel != null && empathyLevel > 0.7) {
                // 在句子间添加适当停顿
                processedText = new StringBuilder(processedText.toString().replaceAll("([，；])", "$1 "));
            }
        }
        
        // 清理多余的空格和换行
        return processedText.toString().replaceAll("\\s+", " ").trim();
    }

    /**
     * 构建语音选项
     */
    private SpeechOptions buildSpeechOptions(VoiceSynthesisRequest request, String voice) {
        return SpeechOptions.builder()
                .withVoice(voice)
                .withSpeed(request.getSpeed() != null ? request.getSpeed() : DEFAULT_SPEED)
                .withResponseFormat(request.getFormat() != null ? request.getFormat() : DEFAULT_FORMAT)
                .build();
    }

    /**
     * 保存音频文件
     */
    private String saveAudioFile(byte[] audioData, String userId, String format) throws IOException {
        if (audioData == null || audioData.length == 0) {
            throw new IllegalArgumentException("No audio data to save");
        }
        
        // 创建存储目录
        Path storageDir = Paths.get(AUDIO_STORAGE_PATH, userId);
        Files.createDirectories(storageDir);
        
        // 生成文件名
        String timestamp = LocalDateTime.now().format(DateTimeFormatter.ofPattern("yyyyMMdd_HHmmss"));
        String fileExtension = format != null ? format : DEFAULT_FORMAT;
        String filename = String.format("voice_%s_%s.%s", timestamp, 
                UUID.randomUUID().toString().substring(0, 8), fileExtension);
        Path filePath = storageDir.resolve(filename);
        
        // 保存音频文件
        Files.write(filePath, audioData);
        
        log.info("Saved synthesized audio to: {}", filePath.toString());
        return filePath.toString();
    }

    /**
     * 估算音频时长（秒）
     */
    private double estimateAudioDuration(String text, Double speed) {
        if (!StringUtils.hasText(text)) {
            return 0.0;
        }
        
        // 基于字符数估算（中文约每分钟200字，英文约每分钟150词）
        int charCount = text.length();
        double baseMinutes = charCount / 200.0; // 假设主要是中文
        double baseDuration = baseMinutes * 60; // 转换为秒
        
        // 根据语速调整
        double actualSpeed = speed != null ? speed : DEFAULT_SPEED;
        return baseDuration / actualSpeed;
    }

    /**
     * 批量语音合成
     */
    public java.util.List<VoiceSynthesisResponse> batchSynthesize(
            java.util.List<VoiceSynthesisRequest> requests) {
        
        return requests.parallelStream()
                .map(this::synthesizeVoice)
                .collect(java.util.stream.Collectors.toList());
    }

    /**
     * 获取支持的声音列表
     */
    public java.util.List<String> getSupportedVoices() {
        return java.util.Arrays.asList(
                "alloy",    // 中性、平衡
                "echo",     // 男性、深沉
                "fable",    // 英式、精致
                "onyx",     // 男性、深沉
                "nova",     // 女性、年轻
                "shimmer"   // 女性、温暖
        );
    }

    /**
     * 获取支持的音频格式
     */
    public java.util.List<String> getSupportedFormats() {
        return java.util.Arrays.asList("mp3", "opus", "aac", "flac");
    }

    /**
     * 获取用户的语音历史
     */
    public java.util.List<String> getUserVoiceHistory(String userId, int limit) {
        try {
            Path userDir = Paths.get(AUDIO_STORAGE_PATH, userId);
            if (!Files.exists(userDir)) {
                return new java.util.ArrayList<>();
            }
            
            return Files.list(userDir)
                    .filter(Files::isRegularFile)
                    .filter(path -> {
                        String filename = path.getFileName().toString().toLowerCase();
                        return filename.endsWith(".mp3") || filename.endsWith(".opus") || 
                               filename.endsWith(".aac") || filename.endsWith(".flac");
                    })
                    .sorted((p1, p2) -> {
                        try {
                            return Files.getLastModifiedTime(p2).compareTo(Files.getLastModifiedTime(p1));
                        } catch (IOException e) {
                            return 0;
                        }
                    })
                    .limit(limit)
                    .map(Path::toString)
                    .collect(java.util.stream.Collectors.toList());
                    
        } catch (Exception e) {
            log.error("Error retrieving voice history for user: {}", userId, e);
            return new java.util.ArrayList<>();
        }
    }

    /**
     * 删除过期的音频文件
     */
    public void cleanupExpiredAudio(int daysToKeep) {
        try {
            Path audioDir = Paths.get(AUDIO_STORAGE_PATH);
            if (!Files.exists(audioDir)) {
                return;
            }
            
            long cutoffTime = System.currentTimeMillis() - (daysToKeep * 24L * 60 * 60 * 1000);
            
            Files.walk(audioDir)
                    .filter(Files::isRegularFile)
                    .filter(path -> {
                        try {
                            return Files.getLastModifiedTime(path).toMillis() < cutoffTime;
                        } catch (IOException e) {
                            return false;
                        }
                    })
                    .forEach(path -> {
                        try {
                            Files.delete(path);
                            log.info("Deleted expired audio file: {}", path);
                        } catch (IOException e) {
                            log.warn("Failed to delete expired audio file: {}", path, e);
                        }
                    });
                    
        } catch (Exception e) {
            log.error("Error cleaning up expired audio files", e);
        }
    }
}