package com.example.flutter_spectrum

import android.media.AudioTrack
import android.media.AudioTrack.ERROR_BAD_VALUE
import com.google.android.exoplayer2.C
import com.google.android.exoplayer2.Format
import com.google.android.exoplayer2.audio.AudioProcessor
import com.google.android.exoplayer2.util.Assertions
import com.google.android.exoplayer2.util.Util
import java.nio.ByteBuffer
import java.nio.ByteOrder
import kotlin.math.max

/**
 * An audio processor which forwards the input to the output,
 * but also takes the input and executes a Fast-Fourier Transformation (FFT) on it.
 * The results of this transformation is a 'list' of frequencies with their amplitudes,
 * which will be forwarded to the listener
 */
class PcmAudioProcessor(private val listener: PcmListener) : AudioProcessor {

    companion object {
        const val SAMPLE_SIZE = 2048

        // From DefaultAudioSink.java:160 'MIN_BUFFER_DURATION_US'
        private const val EXO_MIN_BUFFER_DURATION_US: Long = 250000

        // From DefaultAudioSink.java:164 'MAX_BUFFER_DURATION_US'
        private const val EXO_MAX_BUFFER_DURATION_US: Long = 750000

        // From DefaultAudioSink.java:173 'BUFFER_MULTIPLICATION_FACTOR'
        private const val EXO_BUFFER_MULTIPLICATION_FACTOR = 4

        // Extra size next in addition to the AudioTrack buffer size
        private const val BUFFER_EXTRA_SIZE = SAMPLE_SIZE * 8
    }


    private var isActive: Boolean = false

    private var processBuffer: ByteBuffer
    private var pcmBuffer: ByteBuffer
    private var outputBuffer: ByteBuffer

    private var inputEnded: Boolean = false

    private lateinit var srcBuffer: ByteBuffer
    private var srcBufferPosition = 0
    private val tempByteArray = ByteArray(SAMPLE_SIZE * 2)

    private var audioTrackBufferSize = 0


    interface PcmListener {
        fun onPcmReady(sampleRateHz: Int, channelCount: Int, pcm: DoubleArray)
    }

    init {
        processBuffer = AudioProcessor.EMPTY_BUFFER
        pcmBuffer = AudioProcessor.EMPTY_BUFFER
        outputBuffer = AudioProcessor.EMPTY_BUFFER
    }

    /**
     * The following method matches the implementation of getDefaultBufferSize in DefaultAudioSink
     * of ExoPlayer.
     * Because there is an AudioTrack buffer between the processor and the sound output, the processor receives everything early.
     * By putting the audio data to process in a buffer which has the same size as the audiotrack buffer,
     * we will delay ourselves to match the audio output.
     */
    private fun getDefaultBufferSizeInBytes(audioFormat: AudioProcessor.AudioFormat): Int {
        val outputPcmFrameSize =
            Util.getPcmFrameSize(audioFormat.encoding, audioFormat.channelCount)
        val minBufferSize =
            AudioTrack.getMinBufferSize(
                audioFormat.sampleRate,
                Util.getAudioTrackChannelConfig(audioFormat.channelCount),
                audioFormat.encoding
            )
        Assertions.checkState(minBufferSize != ERROR_BAD_VALUE)
        val multipliedBufferSize = minBufferSize * EXO_BUFFER_MULTIPLICATION_FACTOR
        val minAppBufferSize =
            durationUsToFrames(EXO_MIN_BUFFER_DURATION_US).toInt() * outputPcmFrameSize
        val maxAppBufferSize = max(
            minBufferSize.toLong(),
            durationUsToFrames(EXO_MAX_BUFFER_DURATION_US) * outputPcmFrameSize
        ).toInt()
        val bufferSizeInFrames = Util.constrainValue(
            multipliedBufferSize,
            minAppBufferSize,
            maxAppBufferSize
        ) / outputPcmFrameSize
        return bufferSizeInFrames * outputPcmFrameSize
    }

    private fun durationUsToFrames(durationUs: Long): Long {
        return durationUs * inputAudioFormat.sampleRate / C.MICROS_PER_SECOND
    }

    override fun isActive(): Boolean {
        return isActive
    }

    private lateinit var inputAudioFormat: AudioProcessor.AudioFormat

    override fun configure(inputAudioFormat: AudioProcessor.AudioFormat): AudioProcessor.AudioFormat {
        if (inputAudioFormat.encoding != C.ENCODING_PCM_16BIT) {
            throw AudioProcessor.UnhandledAudioFormatException(
                inputAudioFormat
            )
        }
        this.inputAudioFormat = inputAudioFormat
        isActive = true


        audioTrackBufferSize = getDefaultBufferSizeInBytes(inputAudioFormat)

        srcBuffer = ByteBuffer.allocate(audioTrackBufferSize + BUFFER_EXTRA_SIZE)

        return inputAudioFormat
    }

    override fun queueInput(inputBuffer: ByteBuffer) {
        var position = inputBuffer.position()
        val limit = inputBuffer.limit()
        val frameCount = (limit - position) / (2 * inputAudioFormat.channelCount)
        val singleChannelOutputSize = frameCount * 2
        val outputSize = frameCount * inputAudioFormat.channelCount * 2


        if (processBuffer.capacity() < outputSize) {
            processBuffer = ByteBuffer.allocateDirect(outputSize).order(ByteOrder.nativeOrder())
        } else {
            processBuffer.clear()
        }

        if (pcmBuffer.capacity() < singleChannelOutputSize) {
            pcmBuffer =
                ByteBuffer.allocateDirect(singleChannelOutputSize).order(ByteOrder.nativeOrder())
        } else {
            pcmBuffer.clear()
        }

        while (position < limit) {
            var summedUp = 0
            for (channelIndex in 0 until inputAudioFormat.channelCount) {
                val current = inputBuffer.getShort(position + 2 * channelIndex)
                processBuffer.putShort(current)
                summedUp += current
            }
            // For the FFT, we use an currentAverage of all the channels
            pcmBuffer.putShort((summedUp / inputAudioFormat.channelCount).toShort())
            position += inputAudioFormat.channelCount * 2
        }

        inputBuffer.position(limit)

        processPcm(this.pcmBuffer)

        processBuffer.flip()
        outputBuffer = this.processBuffer
    }

    private fun processPcm(buffer: ByteBuffer) {
        // 确保 buffer 处于读取模式
        buffer.flip()

        // 检查是否有足够的数据
        if (buffer.remaining() < 2) {
            // 数据不足，暂时缓存或等待更多数据
            return
        }

        // 计算需要处理的样本数量
        val sampleCount = buffer.remaining() / 2 // 每个样本占 2 字节（16 位）

        // 创建 DoubleArray 以存储转换后的数据
        val doubleArray = DoubleArray(sampleCount)

        // 将 ByteBuffer 转换为 ShortBuffer，以便读取 Short 数据
        val shortBuffer = buffer.asShortBuffer()

        // 遍历 ShortBuffer，将每个 Short 值转换为 Double 并归一化
        for (i in 0 until sampleCount) {
            val shortValue = shortBuffer.get()
            if (shortValue < Short.MIN_VALUE || shortValue > Short.MAX_VALUE) {
                // 数据无效，丢弃或处理异常
                return
            }
            doubleArray[i] = shortValue.toDouble() / 32768.0 // 归一化到 [-1.0, 1.0]
        }

        // 将 doubleArray 交由外部处理
        listener.onPcmReady(
            inputAudioFormat.sampleRate,
            inputAudioFormat.channelCount,
            doubleArray
        )
    }


    override fun queueEndOfStream() {
        inputEnded = true
        processBuffer = AudioProcessor.EMPTY_BUFFER
    }

    override fun getOutput(): ByteBuffer {
        val outputBuffer = this.outputBuffer
        this.outputBuffer = AudioProcessor.EMPTY_BUFFER
        return outputBuffer
    }

    override fun isEnded(): Boolean {
        return inputEnded && processBuffer === AudioProcessor.EMPTY_BUFFER
    }

    override fun flush() {
        outputBuffer = AudioProcessor.EMPTY_BUFFER
        inputEnded = false
        // A new stream is incoming.
    }

    override fun reset() {
        flush()
        processBuffer = AudioProcessor.EMPTY_BUFFER
        inputAudioFormat =
            AudioProcessor.AudioFormat(Format.NO_VALUE, Format.NO_VALUE, Format.NO_VALUE)
    }
}