package com.alexvas.rtsp.codec

import android.media.*
import android.os.Process
import android.util.Log
import java.nio.ByteBuffer
import java.util.concurrent.LinkedBlockingQueue


class AudioDecodeThread (
        private val mimeType: String,
        private val sampleRate: Int,
        private val channelCount: Int,
        private val codecConfig: ByteArray?,
        private val audioFrameQueue: AudioFrameQueue) : Thread() {

    private var isRunning = true
    private val audioBuffer = LinkedBlockingQueue<ByteArray>()
    private var audioPlayThread: Thread? = null

    fun stopAsync() {
        if (DEBUG) Log.v(TAG, "stopAsync()")
        isRunning = false
        try {
            audioPlayThread?.interrupt()
        }catch (e: Exception){
            e.printStackTrace()
        }
        audioPlayThread = null
        // Wake up sleep() code
        interrupt()
    }

    fun startAudioPlay(audioTrack: AudioTrack) {
        audioPlayThread = Thread{
           while (isRunning){
               try {
                   val bytes = audioBuffer.take()
//                   Log.d(TAG, "audioBuffer count: ${audioBuffer.size} bytes:size: ${bytes.size}")
                   if(audioBuffer.isNotEmpty() && bytes.size > 10){
                       //音频重采样
//                       val resampledData = processAudioForDoubleSpeed(bytes)
                       val resampledData = processAudioKeepPitchKotlin(bytes,16000, 1.2f)

                       audioTrack.write(resampledData, 0, resampledData.size)
                   }else{
                       audioTrack.write(bytes, 0, bytes.size)
                   }


//                   audioTrack.flush()
               }catch (e: Exception){
                   e.printStackTrace()
               }

           }

       }
        audioPlayThread?.start()
    }

    fun processAudioForDoubleSpeed(bytes: ByteArray): ByteArray {
        // 单声道16位PCM数据，每两个字节为一个采样点
        val sampleCount = bytes.size / 2
        val newSampleCount = sampleCount / 2  // 加速一倍，采样点减半
        val result = ByteArray(newSampleCount * 2)

        // 方法1: 简单抽取（每隔一个采样点取一个）
        for (i in 0 until newSampleCount) {
            val srcIndex = i * 4  // 原始数据索引（跳过一个采样点）
            val dstIndex = i * 2  // 目标数据索引

            result[dstIndex] = bytes[srcIndex]
            result[dstIndex + 1] = bytes[srcIndex + 1]
        }

        return result
    }
    /**
     * 对16位单声道PCM进行任意倍速变速（保持音调）
     * @param bytes 输入PCM数据 (16-bit little endian)
     * @param sampleRate 采样率，如16000
     * @param speed 倍速，例如 1.1f = 加速1.1倍, 0.9f = 减速
     */
    fun processAudioKeepPitchKotlin(bytes: ByteArray, sampleRate: Int, speed: Float): ByteArray {
        require(speed > 0f) { "Speed must be positive" }
        if (speed == 1f) return bytes

        val samples = ShortArray(bytes.size / 2)
        for (i in samples.indices) {
            samples[i] = ((bytes[i * 2].toInt() and 0xFF) or (bytes[i * 2 + 1].toInt() shl 8)).toShort()
        }

        val processed = timeStretch(samples, sampleRate, speed)
        val result = ByteArray(processed.size * 2)
        for (i in processed.indices) {
            result[i * 2] = (processed[i].toInt() and 0xFF).toByte()
            result[i * 2 + 1] = ((processed[i].toInt() shr 8) and 0xFF).toByte()
        }
        return result
    }
    /**
     * 基于 WSOLA 的简化实现
     */
    private fun timeStretch(input: ShortArray, sampleRate: Int, speed: Float): ShortArray {
        val windowMs = 50  // 窗口大小 50ms
        val windowSize = sampleRate * windowMs / 1000
        val overlap = windowSize / 2

        val step = (windowSize / speed).toInt()
        val output = ArrayList<Short>()

        var pos = 0
        var prev: ShortArray? = null

        while (pos + windowSize < input.size) {
            val chunk = input.copyOfRange(pos, pos + windowSize)
            if (prev != null) {
                val bestOffset = findBestOverlap(prev, chunk, overlap)
                val blended = overlapAdd(prev, chunk, overlap, bestOffset)
                output.addAll(blended.toList())
            } else {
                output.addAll(chunk.toList())
            }
            prev = chunk
            pos += step
        }

        return output.toShortArray()
    }
    /**
     * 在重叠区域寻找相似度最高的位置
     */
    private fun findBestOverlap(a: ShortArray, b: ShortArray, overlap: Int): Int {
        var bestOffset = 0
        var bestCorr = Float.MIN_VALUE

        for (offset in 0 until overlap) {
            var corr = 0f
            for (i in 0 until overlap) {
                val ai = a[a.size - overlap + i]
                val bi = b[i + offset]
                corr += ai * bi
            }
            if (corr > bestCorr) {
                bestCorr = corr
                bestOffset = offset
            }
        }
        return bestOffset
    }

    /**
     * 将两段音频在重叠区域平滑过渡
     */
    private fun overlapAdd(a: ShortArray, b: ShortArray, overlap: Int, offset: Int): ShortArray {
        val result = ShortArray(a.size - overlap + (b.size - offset))
        var idx = 0

        // 前半部分（非重叠区域）
        for (i in 0 until (a.size - overlap)) {
            result[idx++] = a[i]
        }

        // 重叠区域加权叠加
        for (i in 0 until overlap) {
            val wa = 1f - i / overlap.toFloat()
            val wb = i / overlap.toFloat()
            val ai = a[a.size - overlap + i]
            val bi = b[i + offset]
            val mixed = (ai * wa + bi * wb).toInt().coerceIn(Short.MIN_VALUE.toInt(), Short.MAX_VALUE.toInt())
            result[idx++] = mixed.toShort()
        }

        // 后半部分
        for (i in overlap + offset until b.size) {
            result[idx++] = b[i]
        }

        return result.copyOf(idx)
    }


    override fun run() {
        if (DEBUG) Log.d(TAG, "$name started")

        Process.setThreadPriority(Process.THREAD_PRIORITY_AUDIO)

        // Creating audio decoder
        val decoder = MediaCodec.createDecoderByType(mimeType)
        val format = MediaFormat.createAudioFormat(mimeType, sampleRate, channelCount)

        if (mimeType == MediaFormat.MIMETYPE_AUDIO_AAC) {
            val csd0 = codecConfig ?: getAacDecoderConfigData(MediaCodecInfo.CodecProfileLevel.AACObjectLC, sampleRate, channelCount)
            format.setByteBuffer("csd-0", ByteBuffer.wrap(csd0))
            format.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC)
        } else if (mimeType == MediaFormat.MIMETYPE_AUDIO_OPUS) {
            // TODO: Add Opus support

//            val OPUS_IDENTIFICATION_HEADER = "OpusHead".toByteArray()
//            val OPUS_PRE_SKIP_NSEC = ByteBuffer.allocate(8).putLong(11971).array()
//            val OPUS_SEEK_PRE_ROLL_NSEC = ByteBuffer.allocate(8).putLong(80000000).array()

//            val csd0 = ByteBuffer.allocate(8+1+1+2+4+2+1)
//            csd0.put("OpusHead".toByteArray())
//            // Version
//            csd0.put(1)
//            // Number of channels
//            csd0.put(2)
//            // Pre-skip
//            csd0.putShort(0)
//            csd0.putInt(sampleRate)
//            // Output Gain
//            csd0.putShort(0)
//            // Channel Mapping Family
//            csd0.put(0)
            // Buffer buf = new Buffer();
//                // Magic Signature：固定头，占8个字节，为字符串OpusHead
//                buf.write("OpusHead".getBytes(StandardCharsets.UTF_8));
//                // Version：版本号，占1字节，固定为0x01
//                buf.writeByte(1);
//                // Channel Count：通道数，占1字节，根据音频流通道自行设置，如0x02
//                buf.writeByte(1);
//                // Pre-skip：回放的时候从解码器中丢弃的samples数量，占2字节，为小端模式，默认设置0x00,
//                buf.writeShortLe(0);
//                // Input Sample Rate (Hz)：音频流的Sample Rate，占4字节，为小端模式，根据实际情况自行设置
//                buf.writeIntLe(currentFormat.HZ);
//                //Output Gain：输出增益，占2字节，为小端模式，没有用到默认设置0x00, 0x00就好
//                buf.writeShortLe(0);
//                // Channel Mapping Family：通道映射系列，占1字节，默认设置0x00就好
//                buf.writeByte(0);
//                //Channel Mapping Table：可选参数，上面的Family默认设置0x00的时候可忽略
//            format.setByteBuffer("csd-0", ByteBuffer.wrap(OPUS_IDENTIFICATION_HEADER).order(ByteOrder.BIG_ENDIAN))
//            format.setByteBuffer("csd-1", ByteBuffer.wrap(OPUS_PRE_SKIP_NSEC).order(ByteOrder.BIG_ENDIAN))
//            format.setByteBuffer("csd-2", ByteBuffer.wrap(OPUS_SEEK_PRE_ROLL_NSEC).order(ByteOrder.LITTLE_ENDIAN))

            val csd0 = byteArrayOf(
                0x4f, 0x70, 0x75, 0x73, // "Opus"
                0x48, 0x65, 0x61, 0x64, // "Head"
                0x01,  // Version
                0x02,  // Channel Count
                0x00, 0x00,  // Pre skip
                0x80.toByte(), 0xbb.toByte(), 0x00, 0x00, // Sample rate 48000
                0x00, 0x00,  // Output Gain (Q7.8 in dB)
                0x00,  // Mapping Family
            )
            val csd1 = byteArrayOf(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
            val csd2 = byteArrayOf(0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
            format.setByteBuffer("csd-0", ByteBuffer.wrap(csd0))
            format.setByteBuffer("csd-1", ByteBuffer.wrap(csd1))
            format.setByteBuffer("csd-2", ByteBuffer.wrap(csd2))
        }

        decoder.configure(format, null, null, 0)
        decoder.start()

        // Creating audio playback device
        val outChannel = if (channelCount > 1) AudioFormat.CHANNEL_OUT_STEREO else AudioFormat.CHANNEL_OUT_MONO
        val outAudio = AudioFormat.ENCODING_PCM_16BIT
        val bufferSize = AudioTrack.getMinBufferSize(sampleRate, outChannel, outAudio)
      Log.i(TAG, "sampleRate: $sampleRate, bufferSize: $bufferSize , outChannel:$outChannel")
        val audioTrack = AudioTrack(
                AudioAttributes.Builder()
                        .setUsage(AudioAttributes.USAGE_MEDIA)
                        .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
                        .build(),
                AudioFormat.Builder()
                        .setEncoding(outAudio)
                        .setChannelMask(outChannel)
                        .setSampleRate(sampleRate)
                        .build(),
                bufferSize,
                AudioTrack.MODE_STREAM,
                0)
        audioTrack.play()

        startAudioPlay(audioTrack)
        val bufferInfo = MediaCodec.BufferInfo()
        while (isRunning) {
            val inIndex: Int = decoder.dequeueInputBuffer(10000L)
            if (inIndex >= 0) {
                // fill inputBuffers[inputBufferIndex] with valid data
                var byteBuffer: ByteBuffer?
                try {
                    byteBuffer = decoder.getInputBuffer(inIndex)
                } catch (e: Exception) {
                    e.printStackTrace()
                    break
                }
                byteBuffer?.rewind()

                // Preventing BufferOverflowException
//              if (length > byteBuffer.limit()) throw DecoderFatalException("Error")

                val audioFrame: FrameQueue.Frame?
                try {
                    audioFrame = audioFrameQueue.pop()
                    if (audioFrame == null) {
//                        Log.d(TAG, "Empty audio frame")
                        // Release input buffer
                        decoder.queueInputBuffer(inIndex, 0, 0, 0L, 0)
                    } else {
                        byteBuffer?.put(audioFrame.data, audioFrame.offset, audioFrame.length)
                        decoder.queueInputBuffer(inIndex, audioFrame.offset, audioFrame.length, audioFrame.timestampMs, 0)
                    }
                } catch (e: Exception) {
                    e.printStackTrace()
                }
            }
//            Log.i(TAG, "inIndex: ${inIndex}")

            try {
//                Log.w(TAG, "outIndex: ${outIndex}")
                if (!isRunning) break
                var outIndex = decoder.dequeueOutputBuffer(bufferInfo, 10000L)
                while (outIndex >= 0){
                    val byteBuffer: ByteBuffer? = decoder.getOutputBuffer(outIndex)

                    val chunk = ByteArray(bufferInfo.size)
                    byteBuffer?.get(chunk)
                    byteBuffer?.clear()

                    if (chunk.isNotEmpty()) {
//                                audioTrack.write(chunk, 0, chunk.size)
                        try {
                            audioBuffer.put(chunk)
                        }catch (e : Exception){
                            e.printStackTrace()
                        }

                    }
                    decoder.releaseOutputBuffer(outIndex, false)
                    outIndex = decoder.dequeueOutputBuffer(bufferInfo, 0)
                }
//                when (val outIndex = decoder.dequeueOutputBuffer(bufferInfo, 10000L)) {
//                    MediaCodec.INFO_OUTPUT_FORMAT_CHANGED -> Log.d(TAG, "Decoder format changed: ${decoder.outputFormat}")
//                    MediaCodec.INFO_TRY_AGAIN_LATER -> if (DEBUG) Log.d(TAG, "No output from decoder available")
//                    else -> {
//                        if (outIndex >= 0) {
//                            val byteBuffer: ByteBuffer? = decoder.getOutputBuffer(outIndex)
//
//                            val chunk = ByteArray(bufferInfo.size)
//                            byteBuffer?.get(chunk)
//                            byteBuffer?.clear()
//
//                            if (chunk.isNotEmpty()) {
////                                audioTrack.write(chunk, 0, chunk.size)
//                                try {
//                                    audioBuffer.put(chunk)
//                                }catch (e : Exception){
//                                    e.printStackTrace()
//                                }
//
//                            }
//                            decoder.releaseOutputBuffer(outIndex, false)
//                        }
//                    }
//                }
            } catch (e: Exception) {
                e.printStackTrace()
            }

            // All decoded frames have been rendered, we can stop playing now
            if (bufferInfo.flags and MediaCodec.BUFFER_FLAG_END_OF_STREAM != 0) {
                Log.d(TAG, "OutputBuffer BUFFER_FLAG_END_OF_STREAM")
                break
            }
        }
        audioTrack.flush()
        audioTrack.release()

        try {
            decoder.stop()
            decoder.release()
        } catch (_: InterruptedException) {
        } catch (e: Exception) {
            e.printStackTrace()
        }
        audioFrameQueue.clear()
        if (DEBUG) Log.d(TAG, "$name stopped")
    }

    companion object {
        private val TAG: String = AudioDecodeThread::class.java.simpleName
        private const val DEBUG = false

        fun getAacDecoderConfigData(audioProfile: Int, sampleRate: Int, channels: Int): ByteArray {
            // AOT_LC = 2
            // 0001 0000 0000 0000
            var extraDataAac = audioProfile shl 11
            // Sample rate
            when (sampleRate) {
                7350 -> extraDataAac = extraDataAac or (0xC shl 7)
                8000 -> extraDataAac = extraDataAac or (0xB shl 7)
                11025 -> extraDataAac = extraDataAac or (0xA shl 7)
                12000 -> extraDataAac = extraDataAac or (0x9 shl 7)
                16000 -> extraDataAac = extraDataAac or (0x8 shl 7)
                22050 -> extraDataAac = extraDataAac or (0x7 shl 7)
                24000 -> extraDataAac = extraDataAac or (0x6 shl 7)
                32000 -> extraDataAac = extraDataAac or (0x5 shl 7)
                44100 -> extraDataAac = extraDataAac or (0x4 shl 7)
                48000 -> extraDataAac = extraDataAac or (0x3 shl 7)
                64000 -> extraDataAac = extraDataAac or (0x2 shl 7)
                88200 -> extraDataAac = extraDataAac or (0x1 shl 7)
                96000 -> extraDataAac = extraDataAac or (0x0 shl 7)
            }
            // Channels
            extraDataAac = extraDataAac or (channels shl 3)
            val extraData = ByteArray(2)
            extraData[0] = (extraDataAac and 0xff00 shr 8).toByte() // high byte
            extraData[1] = (extraDataAac and 0xff).toByte()         // low byte
            return extraData
        }
    }

}

