package com.voicecomm.audio;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.sound.sampled.AudioFormat;
import javax.sound.sampled.AudioInputStream;
import javax.sound.sampled.AudioSystem;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;

/**
 * 音频处理器
 * 负责音频数据的格式转换、降噪、分帧等处理
 * 
 * @author VoiceComm Team
 * @version 1.0.0
 */
public class AudioProcessor {
    
    private static final Logger logger = LoggerFactory.getLogger(AudioProcessor.class);
    
    // 目标音频格式（用于语音识别）
    private static final AudioFormat TARGET_FORMAT = new AudioFormat(
        AudioFormat.Encoding.PCM_SIGNED,
        16000.0f,    // 16kHz采样率
        16,          // 16位采样
        1,           // 单声道
        2,           // 2字节帧大小
        16000.0f,    // 16kHz帧率
        false        // 小端序
    );
    
    /**
     * 转换音频格式
     * 
     * @param audioData 原始音频数据
     * @param sourceFormat 源音频格式
     * @return 转换后的音频数据
     * @throws IOException 音频转换异常
     */
    public byte[] convertAudioFormat(byte[] audioData, AudioFormat sourceFormat) throws IOException {
        if (audioData == null || audioData.length == 0) {
            return new byte[0];
        }
        
        try (ByteArrayInputStream bais = new ByteArrayInputStream(audioData);
             AudioInputStream sourceStream = new AudioInputStream(bais, sourceFormat, audioData.length / sourceFormat.getFrameSize());
             AudioInputStream targetStream = AudioSystem.getAudioInputStream(TARGET_FORMAT, sourceStream);
             ByteArrayOutputStream baos = new ByteArrayOutputStream()) {
            
            byte[] buffer = new byte[4096];
            int bytesRead;
            
            while ((bytesRead = targetStream.read(buffer)) != -1) {
                baos.write(buffer, 0, bytesRead);
            }
            
            byte[] convertedData = baos.toByteArray();
            logger.debug("音频格式转换完成，原始大小: {} bytes, 转换后大小: {} bytes", 
                        audioData.length, convertedData.length);
            
            return convertedData;
            
        } catch (Exception e) {
            logger.error("音频格式转换失败", e);
            throw new IOException("音频格式转换失败", e);
        }
    }
    
    /**
     * 将音频数据分帧处理
     * 
     * @param audioData 音频数据
     * @param frameSize 帧大小（字节）
     * @return 分帧后的音频数据数组
     */
    public byte[][] splitIntoFrames(byte[] audioData, int frameSize) {
        if (audioData == null || audioData.length == 0) {
            return new byte[0][];
        }
        
        int frameCount = (audioData.length + frameSize - 1) / frameSize;
        byte[][] frames = new byte[frameCount][];
        
        for (int i = 0; i < frameCount; i++) {
            int start = i * frameSize;
            int end = Math.min(start + frameSize, audioData.length);
            int length = end - start;
            
            frames[i] = new byte[length];
            System.arraycopy(audioData, start, frames[i], 0, length);
        }
        
        logger.debug("音频分帧完成，总帧数: {}", frameCount);
        return frames;
    }
    
    /**
     * 应用简单的降噪处理
     * 
     * @param audioData 原始音频数据
     * @param threshold 噪声阈值
     * @return 降噪后的音频数据
     */
    public byte[] applyNoiseReduction(byte[] audioData, double threshold) {
        if (audioData == null || audioData.length == 0) {
            return audioData;
        }
        
        byte[] processedData = new byte[audioData.length];
        System.arraycopy(audioData, 0, processedData, 0, audioData.length);
        
        // 简单的阈值降噪
        for (int i = 0; i < processedData.length; i += 2) {
            if (i + 1 < processedData.length) {
                // 将两个字节组合成16位样本
                short sample = (short) ((processedData[i + 1] << 8) | (processedData[i] & 0xFF));
                
                // 如果样本值小于阈值，则设为0
                if (Math.abs(sample) < threshold) {
                    sample = 0;
                }
                
                // 将处理后的样本写回字节数组
                processedData[i] = (byte) (sample & 0xFF);
                processedData[i + 1] = (byte) ((sample >> 8) & 0xFF);
            }
        }
        
//        logger.debug("降噪处理完成，阈值: {}", threshold);
        return processedData;
    }

    /**
     * 计算音频的 RMS（均方根）幅度（基于16位PCM）
     *
     * @param audioData PCM16LE 数据
     * @return RMS 幅度（样本值标度，0-32767）
     */
    public double computeRms(byte[] audioData) {
        if (audioData == null || audioData.length < 2) return 0.0;
        long sumSq = 0L;
        int samples = 0;
        for (int i = 0; i + 1 < audioData.length; i += 2) {
            short sample = (short) ((audioData[i + 1] << 8) | (audioData[i] & 0xFF));
            sumSq += (long) sample * (long) sample;
            samples++;
        }
        if (samples == 0) return 0.0;
        double meanSq = (double) sumSq / samples;
        return Math.sqrt(meanSq);
    }

    /**
     * 将音频归一化到目标 RMS（带简单峰值限幅以避免削波）
     *
     * @param audioData PCM16LE 数据
     * @param targetRms 目标 RMS（建议 2000-5000）
     * @return 归一化后的 PCM16LE 数据
     */
    public byte[] normalizeToTargetRms(byte[] audioData, double targetRms) {
        if (audioData == null || audioData.length < 2 || targetRms <= 0) return audioData;
        double rms = computeRms(audioData);
        if (rms <= 1e-3) return audioData; // 静音或极低，不放大以免噪声被放大
        double gain = targetRms / rms;
        // 限制增益范围，防止过度放大
        if (gain > 8.0) gain = 8.0; // 最多放大 18dB 左右
        if (gain < 0.25) gain = 0.25; // 最多衰减 12dB 左右

        byte[] out = new byte[audioData.length];
        for (int i = 0; i + 1 < audioData.length; i += 2) {
            short sample = (short) ((audioData[i + 1] << 8) | (audioData[i] & 0xFF));
            int scaled = (int) Math.round(sample * gain);
            if (scaled > Short.MAX_VALUE) scaled = Short.MAX_VALUE;
            if (scaled < Short.MIN_VALUE) scaled = Short.MIN_VALUE;
            out[i] = (byte) (scaled & 0xFF);
            out[i + 1] = (byte) ((scaled >> 8) & 0xFF);
        }
        return out;
    }

    /**
     * 语音增强：先进行简单阈值降噪，再将电平归一化到目标 RMS
     *
     * @param audioData PCM16LE 数据
     * @param noiseThreshold 降噪阈值（同 applyNoiseReduction）
     * @param targetRms 目标 RMS（0 代表不做归一化）
     * @return 增强后的 PCM16LE 数据
     */
    public byte[] enhanceSpeech(byte[] audioData, double noiseThreshold, double targetRms) {
         byte[] denoised = applyNoiseReduction(audioData, noiseThreshold);
         if (targetRms > 0) {
             return normalizeToTargetRms(denoised, targetRms);
         }
        return denoised;
    }
    
    /**
     * 检测音频中的静音段
     * 
     * @param audioData 音频数据
     * @param silenceThreshold 静音阈值
     * @param minSilenceDuration 最小静音持续时间（毫秒）
     * @return 静音段信息数组
     */
    public SilenceSegment[] detectSilence(byte[] audioData, double silenceThreshold, int minSilenceDuration) {
        if (audioData == null || audioData.length == 0) {
            return new SilenceSegment[0];
        }
        
        java.util.List<SilenceSegment> silenceSegments = new java.util.ArrayList<>();
        boolean inSilence = false;
        int silenceStart = 0;
        int sampleRate = 16000; // 假设16kHz采样率
        int minSilenceSamples = (minSilenceDuration * sampleRate) / 1000;
        
        for (int i = 0; i < audioData.length; i += 2) {
            if (i + 1 < audioData.length) {
                short sample = (short) ((audioData[i + 1] << 8) | (audioData[i] & 0xFF));
                boolean isSilent = Math.abs(sample) < silenceThreshold;
                
                if (isSilent && !inSilence) {
                    // 开始静音段
                    inSilence = true;
                    silenceStart = i;
                } else if (!isSilent && inSilence) {
                    // 结束静音段
                    int silenceLength = i - silenceStart;
                    if (silenceLength >= minSilenceSamples) {
                        silenceSegments.add(new SilenceSegment(silenceStart, silenceLength));
                    }
                    inSilence = false;
                }
            }
        }
        
        // 处理文件末尾的静音段
        if (inSilence) {
            int silenceLength = audioData.length - silenceStart;
            if (silenceLength >= minSilenceSamples) {
                silenceSegments.add(new SilenceSegment(silenceStart, silenceLength));
            }
        }
        
//        logger.debug("检测到 {} 个静音段", silenceSegments.size());
        return silenceSegments.toArray(new SilenceSegment[0]);
    }
    
    /**
     * 静音段信息类
     */
    public static class SilenceSegment {
        private final int startPosition;
        private final int length;
        
        public SilenceSegment(int startPosition, int length) {
            this.startPosition = startPosition;
            this.length = length;
        }
        
        public int getStartPosition() {
            return startPosition;
        }
        
        public int getLength() {
            return length;
        }
        
        public int getEndPosition() {
            return startPosition + length;
        }
        
        @Override
        public String toString() {
            return String.format("SilenceSegment{start=%d, length=%d, end=%d}", 
                               startPosition, length, getEndPosition());
        }
    }
}
