package com.example.voiceassistant.audio;

import com.example.voiceassistant.config.ConfigManager;
import com.example.voiceassistant.monitoring.MonitoringService;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.sound.sampled.*;
import java.time.Duration;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;

/**
 * 音频采集服务，切换为真实麦克风输入，使用 Java Sound API 捕获 PCM 流。
 * 调试模式：添加详细控制台打印以诊断麦克风问题。
 * 每隔固定间隔读取音频缓冲区，计算音量（RMS），若超过阈值生成模拟帧字符串分发。
 */
public class AudioCaptureService {

    private static final Logger LOGGER = LoggerFactory.getLogger(AudioCaptureService.class);

    private final ConfigManager configManager;
    private final MonitoringService monitoringService;
    private final List<AudioFrameListener> listeners = new CopyOnWriteArrayList<>();
    private final ScheduledExecutorService executorService = Executors.newSingleThreadScheduledExecutor();

    private volatile boolean running = false;
    private TargetDataLine line;
    private AudioFormat audioFormat;
    private byte[] buffer;

    private byte[] audioBuffer = new byte[0];
    private long silenceStart = -1;
    private final int silenceThresholdMs = 1000; // 静音 1s 结束录制
    private boolean isRecording = false;

    public AudioCaptureService(ConfigManager configManager, MonitoringService monitoringService) {
        this.configManager = configManager;
        this.monitoringService = monitoringService;
        // 配置音频格式：16kHz, 16-bit, mono
        this.audioFormat = new AudioFormat(16000, 16, 1, true, false);
        this.buffer = new byte[1024]; // 缓冲区大小
    }

    public void addListener(AudioFrameListener listener) {
        listeners.add(listener);
    }

    public void removeListener(AudioFrameListener listener) {
        listeners.remove(listener);
    }

    /**
     * 启动真实麦克风采集。
     */
    public void start() {
        System.out.println("DEBUG: Starting audio capture service..."); // Debug: 开始启动
        if (running) {
            LOGGER.warn("AudioCaptureService 已经启动，无需重复启动。");
            return;
        }
        running = true;

        try {
            System.out.println("DEBUG: Attempting to open microphone..."); // Debug: 尝试打开麦克风
            DataLine.Info info = new DataLine.Info(TargetDataLine.class, audioFormat);
            line = (TargetDataLine) AudioSystem.getLine(info);
            line.open(audioFormat);
            line.start();
            System.out.println("DEBUG: Microphone opened successfully, format: " + audioFormat); // Debug: 麦克风打开成功
            LOGGER.info("启动真实麦克风采集服务，格式: {}", audioFormat);
        } catch (LineUnavailableException e) {
            System.out.println("ERROR: Microphone unavailable: " + e.getMessage()); // Debug: 麦克风不可用
            LOGGER.error("麦克风不可用，请检查硬件/权限", e);
            monitoringService.counter("audio.mic.error").increment();
            return;
        } catch (Exception e) {
            System.out.println("ERROR: Unexpected error opening mic: " + e.getMessage()); // Debug: 其他错误
            LOGGER.error("打开麦克风异常", e);
            return;
        }

        Duration interval = Duration.ofMillis(500); // 每 500ms 处理一次
        System.out.println("DEBUG: Starting scheduler with interval " + interval.toMillis() + "ms"); // Debug: 启动调度器
        executorService.scheduleAtFixedRate(() -> {
            try {
//                System.out.println("DEBUG: Reading audio buffer..."); // Debug: 读取缓冲区
                if (!running || line == null) {
                    System.out.println("DEBUG: Not running or line null, skipping"); // Debug: 跳过
                    return;
                }
                int bytesRead = line.read(buffer, 0, buffer.length);
//                System.out.println("DEBUG: Bytes read: " + bytesRead); // Debug: 读取字节数
                if (bytesRead > 0) {
                    // 计算 RMS 音量
                    double rms = 0;
                    for (int i = 0; i < bytesRead; i += 2) {
                        short sample = (short) ((buffer[i] & 0xFF) | (buffer[i + 1] << 8));
                        rms += sample * sample;
                    }
                    rms = Math.sqrt(rms / (bytesRead / 2));
//                    System.out.println("DEBUG: RMS volume: " + rms); // Debug: RMS 值

                    byte[] newData = new byte[bytesRead];
                    System.arraycopy(buffer, 0, newData, 0, bytesRead);

                    if (rms > 500) {
                        if (!isRecording) {
                            isRecording = true;
                            audioBuffer = new byte[0];
                            silenceStart = -1;
                            System.out.println("DEBUG: Voice activity started");
                        }
                        // 追加数据
                        byte[] temp = new byte[audioBuffer.length + newData.length];
                        System.arraycopy(audioBuffer, 0, temp, 0, audioBuffer.length);
                        System.arraycopy(newData, 0, temp, audioBuffer.length, newData.length);
                        audioBuffer = temp;
                    } else {
                        if (isRecording) {
                            if (silenceStart == -1) {
                                silenceStart = System.currentTimeMillis();
                            } else if (System.currentTimeMillis() - silenceStart > silenceThresholdMs) {
                                isRecording = false;
                                // 添加 WAV 头并分发完整音频块
                                byte[] wavData = addWavHeader(audioBuffer, audioFormat);
                                System.out.println("DEBUG: Voice segment ended, length: " + wavData.length + " bytes");
                                monitoringService.counter("audio.segment.emitted").increment();
                                listeners.forEach(listener -> listener.onAudioData(wavData)); // 分发 byte[]
                                audioBuffer = new byte[0];
                            }
                        }
                    }
                } else {
                    // 静音处理类似
                    if (isRecording) {
                        if (silenceStart == -1) {
                            silenceStart = System.currentTimeMillis();
                        } else if (System.currentTimeMillis() - silenceStart > silenceThresholdMs) {
                            isRecording = false;
                            // 添加 WAV 头并分发完整音频块
                            byte[] wavData = addWavHeader(audioBuffer, audioFormat);
                            System.out.println("DEBUG: Voice segment ended, length: " + wavData.length + " bytes");
                            monitoringService.counter("audio.segment.emitted").increment();
                            listeners.forEach(listener -> listener.onAudioData(wavData)); // 分发 byte[]
                            audioBuffer = new byte[0];
                        }
                    }
                    String frame = "静音"; // 兼容旧打印
                    System.out.println("Emitting audio frame: " + frame);
                }
            } catch (Exception e) {
                System.out.println("ERROR: Audio capture exception: " + e.getMessage()); // Debug: 采集异常
                e.printStackTrace(); // 打印栈追踪
                monitoringService.counter("audio.frame.error").increment();
                LOGGER.error("音频采集异常", e);
            }
        }, 0, interval.toMillis(), TimeUnit.MILLISECONDS);
        System.out.println("DEBUG: Scheduler submitted successfully"); // Debug: 调度器提交成功
    }

    /**
     * 停止采集并释放资源。
     */
    public void stop() {
        System.out.println("DEBUG: Stopping audio capture..."); // Debug: 开始停止
        if (!running) {
            LOGGER.warn("AudioCaptureService 未启动，无需停止。");
            return;
        }
        running = false;
        if (line != null) {
            line.close();
            System.out.println("DEBUG: Microphone closed"); // Debug: 麦克风关闭
            LOGGER.info("麦克风已关闭。");
        }
        executorService.shutdownNow();
        LOGGER.info("音频采集服务已停止。");
    }

    // 添加 WAV 头方法
    private byte[] addWavHeader(byte[] pcmData, AudioFormat format) {
        int sampleRate = (int) format.getSampleRate();
        int channels = format.getChannels();
        int bitsPerSample = format.getSampleSizeInBits();
        int byteRate = sampleRate * channels * bitsPerSample / 8;
        int blockAlign = channels * bitsPerSample / 8;
        int dataSize = pcmData.length;

        byte[] header = new byte[44];
        // RIFF header
        header[0] = 'R'; header[1] = 'I'; header[2] = 'F'; header[3] = 'F';
        writeInt(header, 4, 36 + dataSize); // Chunk size
        header[8] = 'W'; header[9] = 'A'; header[10] = 'V'; header[11] = 'E';
        // fmt subchunk
        header[12] = 'f'; header[13] = 'm'; header[14] = 't'; header[15] = ' ';
        writeInt(header, 16, 16); // Subchunk1 size
        writeShort(header, 20, (short) 1); // Audio format (PCM)
        writeShort(header, 22, (short) channels);
        writeInt(header, 24, sampleRate);
        writeInt(header, 28, byteRate);
        writeShort(header, 30, (short) blockAlign);
        writeShort(header, 32, (short) bitsPerSample);
        // data subchunk
        header[36] = 'd'; header[37] = 'a'; header[38] = 't'; header[39] = 'a';
        writeInt(header, 40, dataSize);

        byte[] wav = new byte[header.length + dataSize];
        System.arraycopy(header, 0, wav, 0, header.length);
        System.arraycopy(pcmData, 0, wav, header.length, dataSize);
        return wav;
    }

    private void writeInt(byte[] array, int offset, int value) {
        array[offset] = (byte) (value & 0xFF);
        array[offset + 1] = (byte) ((value >> 8) & 0xFF);
        array[offset + 2] = (byte) ((value >> 16) & 0xFF);
        array[offset + 3] = (byte) ((value >> 24) & 0xFF);
    }

    private void writeShort(byte[] array, int offset, short value) {
        array[offset] = (byte) (value & 0xFF);
        array[offset + 1] = (byte) ((value >> 8) & 0xFF);
    }
}

