package com.soriya.switcher.core

import android.annotation.SuppressLint
import android.content.Context
import android.hardware.vcamera.VirtualCameraManager
import android.media.AudioAttributes
import android.media.AudioFormat
import android.media.AudioTrack
import android.media.MediaCodec
import android.media.MediaExtractor
import android.media.MediaFormat
import android.media.MediaMetadataRetriever
import android.os.Handler
import android.os.HandlerThread
import android.os.MemoryFile
import android.os.ParcelFileDescriptor
import android.os.SharedMemory
import android.system.OsConstants
import android.util.Log
import android.view.Surface
import com.soriya.switcher.config.CameraConfig
import com.soriya.switcher.util.showToast
import java.io.FileDescriptor
import java.io.FileOutputStream
import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicBoolean
import kotlin.math.absoluteValue
import kotlin.math.sqrt

class VideoDecoder(context: Context) : MediaCodec.Callback() {

    companion object {
        private const val TAG = "VideoDecoder"
    }

    private var videoDecoder: MediaCodec? = null
    private var audioDecoder: MediaCodec? = null
    private var videoHandlerThread: HandlerThread? = null
    private var audioHandlerThread: HandlerThread? = null

    private var videoExtractor: MediaExtractor? = null
    private var audioExtractor: MediaExtractor? = null
    private var videoFormat: MediaFormat? = null
    private var audioFormat: MediaFormat? = null

    private var audioTrack: AudioTrack? = null

    private val virtualCameraManager: VirtualCameraManager = context.getSystemService(Context.VIRTUAL_CAMERA_SERVICE) as VirtualCameraManager

    private var videoBufferSharedMemory: SharedMemory? = null
    private var videoBufferMapped: ByteBuffer? = null

    private var isStarted = AtomicBoolean(false)

    private var audioPresentationTimeUs = 0L
    private val clock = object {
        var isStarted = false
        var startTimeNs = 0L
        fun start() {
            isStarted = true
            startTimeNs = System.nanoTime()
        }
        fun reset() {
            isStarted = false
            startTimeNs = 0L
        }
        fun getElapseUs(): Long {
            if (!isStarted) return 0L
            return (System.nanoTime() - startTimeNs) / 1000
        }
    }

    private var volumeListener: ((Float) -> Unit)? = null

    init {
        Log.d(TAG, "virtualCameraManager: $virtualCameraManager")
    }

    fun started(): Boolean = isStarted.get()

    fun start(videoPath: String, rotate: Int = CameraConfig.ROTATE_0, surface: Surface? = null) {
        parseVideo(videoPath)

        if (videoExtractor == null || audioExtractor == null) {
            showToast("视频读取失败")
            return
        }

        val videoMimetype = videoFormat?.getString(MediaFormat.KEY_MIME)
        val width = videoFormat?.getInteger(MediaFormat.KEY_WIDTH)
        val height = videoFormat?.getInteger(MediaFormat.KEY_HEIGHT)
        val audioMimetype = audioFormat?.getString(MediaFormat.KEY_MIME)
        val sampleRate = audioFormat?.getInteger(MediaFormat.KEY_SAMPLE_RATE)
        val channelCount = audioFormat?.getInteger(MediaFormat.KEY_CHANNEL_COUNT)
        val bitRate = audioFormat?.getInteger(MediaFormat.KEY_BIT_RATE)

        Log.i(TAG, "mimetype: $videoMimetype, size: ${width}x$height")

        if (videoMimetype == null || width == null || height == null) {
            showToast("读取视频元数据失败")
            return
        }

        Log.i(TAG, "mimetype: $audioMimetype, sampleRate: $sampleRate, channel: $channelCount, bitRate: $bitRate")

        if (audioMimetype == null || sampleRate == null || channelCount == null) {
            showToast("读取音频元数据失败")
            return
        }

        clock.reset()

        virtualCameraManager.setVideoConfig(width, height, rotate)
        virtualCameraManager.setAudioConfig(sampleRate, channelCount)
        virtualCameraManager.open()

        videoBufferSharedMemory = SharedMemory.create("VideoFrame", width * height * 3 / 2)
        videoBufferMapped = videoBufferSharedMemory?.mapReadWrite()
        virtualCameraManager.setVideoBuffer(videoBufferSharedMemory)


        val channelConfig = if (channelCount == 1) AudioFormat.CHANNEL_OUT_MONO else AudioFormat.CHANNEL_OUT_STEREO
        audioTrack = AudioTrack.Builder()
            .setAudioAttributes(
                AudioAttributes.Builder()
                    .setUsage(AudioAttributes.USAGE_MEDIA)
                    .setContentType(AudioAttributes.CONTENT_TYPE_MUSIC)
                    .build()
            )
            .setAudioFormat(
                AudioFormat.Builder()
                    .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
                    .setSampleRate(sampleRate)
                    .setChannelMask(channelConfig)
                    .build()
            )
            .setBufferSizeInBytes(
                AudioTrack.getMinBufferSize(sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
            )
            .build()
        audioTrack?.play()


        videoDecoder = MediaCodec.createDecoderByType(videoMimetype)
        // Android 12+
//        videoFormat!!.setInteger(MediaFormat.KEY_ROTATION, 90)
        videoDecoder!!.configure(videoFormat, surface, null, 0)
        audioDecoder = MediaCodec.createDecoderByType(audioMimetype)
        audioDecoder!!.configure(audioFormat, null, null, 0)

        isStarted.set(true)

        videoHandlerThread = HandlerThread("VideoCodecHandlerThread").apply {
            start()
        }
        audioHandlerThread = HandlerThread("AudioCodecHandlerThread").apply {
            start()
        }
        videoDecoder!!.setCallback(this, Handler(videoHandlerThread!!.looper))
        videoDecoder!!.start()
        audioDecoder!!.setCallback(this, Handler(audioHandlerThread!!.looper))
        audioDecoder!!.start()
    }

    fun stop() {
        if (!isStarted.get()) return
        isStarted.set(false)

        virtualCameraManager.close()

//        videoDecoder?.setCallback(null)
//        audioDecoder?.setCallback(null)

        videoHandlerThread?.looper?.quit()
        audioHandlerThread?.looper?.quit()
        try {
            videoHandlerThread?.join(300)
            audioHandlerThread?.join(300)
        } catch (e: Exception) {
            Thread.currentThread().interrupt()
        }


        videoExtractor?.release()
        audioExtractor?.release()

        videoDecoder?.run {
            stop()
            release()
        }
        audioDecoder?.run {
            stop()
            release()
        }

        audioTrack?.stop()
        audioTrack?.release()
        audioTrack = null

        videoBufferSharedMemory?.close()
        videoBufferMapped?.clear()
        Log.d(TAG, "videoBufferSharedMemory close")

        videoHandlerThread = null

        videoDecoder = null
        audioDecoder = null
    }

    fun setOnVolumeListener(volumeListener: (Float) -> Unit) {
        this.volumeListener = volumeListener
    }

    override fun onInputBufferAvailable(codec: MediaCodec, index: Int) {
        if (videoExtractor == null || audioExtractor == null) return
        if (!isStarted.get()) return

        var extractor: MediaExtractor? = null

        if (codec == videoDecoder) {
//            Log.d(TAG, "codec: video")
            extractor = videoExtractor
        } else if (codec == audioDecoder) {
//            Log.d(TAG, "codec: audio")
            extractor = audioExtractor
        }

        extractor?.let {
            val buffer = codec.getInputBuffer(index) ?: return
            var size = extractor.readSampleData(buffer, 0)
            if (size < 0) {
//                clock.reset()
                extractor.seekTo(0, MediaExtractor.SEEK_TO_PREVIOUS_SYNC)
                // 重新读数据
                size = extractor.readSampleData(buffer, 0)
                if (size < 0) {
                    // 异常情况
                    codec.queueInputBuffer(index, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM)
                    return
                }
            }
            codec.queueInputBuffer(index, 0, size, extractor.sampleTime, 0)
            extractor.advance()
        }
    }

    var lastVideoFrameUs = 0L
    @SuppressLint("DiscouragedPrivateApi")
    override fun onOutputBufferAvailable(
        codec: MediaCodec,
        index: Int,
        info: MediaCodec.BufferInfo
    ) {
        if (!isStarted.get()) return

        val presentationTimeUs = info.presentationTimeUs

        val buffer = codec.getOutputBuffer(index) ?: return

        if (codec == audioDecoder) {
            val byteData = ByteArray(info.size)
            buffer.get(byteData)
            if (!clock.isStarted) {
                clock.start()
            }
            audioPresentationTimeUs = presentationTimeUs

            volumeListener?.let {
                val volume = byteArrayToVolume(byteData, channelCount = audioFormat?.getInteger(MediaFormat.KEY_CHANNEL_COUNT) ?: 1)
                it(volume)
            }
            val len = audioTrack?.write(byteData, 0 , byteData.size)
            Log.d(TAG, "len: $len")
            virtualCameraManager.writeAudio(byteData)
            codec.releaseOutputBuffer(index, false)
        } else if (codec == videoDecoder) {
            val nowUs = System.nanoTime() / 1000
            lastVideoFrameUs = nowUs
            val sleepUs = presentationTimeUs - clock.getElapseUs()
            // 大于10ms(10_000us)才睡眠
            Log.d(TAG, "size: ${info.size}, sleepUs: $sleepUs, presentationTimeUs: $presentationTimeUs, getElapseUs: ${clock.getElapseUs()}")
            if (sleepUs > 10_000) {
                Thread.sleep(sleepUs / 1000, (sleepUs % 1000).toInt() * 1000)
            }

            videoBufferMapped?.position(0)
            videoBufferMapped?.put(buffer)
            val time1 = System.nanoTime() / 1000
            virtualCameraManager.onVideoFrameAvailable(info.size, 0)
            Log.d(TAG, "onVideoFrameAvailable: ${(System.nanoTime() / 1000) - time1}")

            codec.releaseOutputBuffer(index, true)
        }
    }

    override fun onOutputFormatChanged(
        codec: MediaCodec,
        format: MediaFormat
    ) {}

    override fun onError(
        codec: MediaCodec,
        e: MediaCodec.CodecException
    ) {
        Log.e(TAG, "onError: ", e)
        isStarted.set(false)
    }

    private fun parseVideo(videoPath: String) {
        try {
            val mediaExtractor = MediaExtractor()
            mediaExtractor.setDataSource(videoPath)

            var videoTrackIndex = -1
            var audioTrackIndex = -1

            for (index in 0 until mediaExtractor.trackCount) {
                val trackFormat = mediaExtractor.getTrackFormat(index)
                if (trackFormat.getString(MediaFormat.KEY_MIME)?.startsWith("video/") == true) {
                    videoFormat = trackFormat
                    videoTrackIndex = index
                }
                if (trackFormat.getString(MediaFormat.KEY_MIME)?.startsWith("audio/") == true) {
                    audioFormat = trackFormat
                    audioTrackIndex = index
                }
            }

            videoExtractor = MediaExtractor().apply {
                setDataSource(videoPath)
                selectTrack(videoTrackIndex)
                seekTo(0, MediaExtractor.SEEK_TO_PREVIOUS_SYNC)
            }
            audioExtractor = MediaExtractor().apply {
                setDataSource(videoPath)
                selectTrack(audioTrackIndex)
                seekTo(0, MediaExtractor.SEEK_TO_PREVIOUS_SYNC)
            }
        } catch (e: Exception) {
            null
        }
    }

    /**
     * 将 16-bit PCM 音频字节数组转换为归一化的音量值（0.0 ~ 1.0）
     *
     * @param audioBytes 原始音频字节（必须是 16-bit PCM）
     * @param useRms 是否使用 RMS（均方根）计算音量（推荐 true）
     * @param channelCount 声道数（1=mono, 2=stereo）
     * @return 音量值，范围 [0.0, 1.0]
     */
    private fun byteArrayToVolume(
        audioBytes: ByteArray,
        useRms: Boolean = true,
        channelCount: Int = 1
    ): Float {
        if (audioBytes.isEmpty()) return 0f

        // 16-bit PCM：每 sample 2 bytes
        val sampleCount = audioBytes.size / (2 * channelCount)
        if (sampleCount <= 0) return 0f

        var maxAmplitude = 0f
        var sumSquares = 0f

        // 遍历每个 sample（跳过声道间隔）
        for (i in 0 until sampleCount) {
            // 读取一个 16-bit sample（小端）
            val byteIndex = i * 2 * channelCount
            val low = audioBytes[byteIndex].toInt() and 0xFF
            val high = audioBytes[byteIndex + 1].toInt() and 0xFF
            val sample = (high shl 8 or low).toShort().toInt()

            // 转为 [-1.0, 1.0] 范围
            val amplitude = sample / 32768f // Short.MAX_VALUE = 32767，但用 32768 归一化更安全
            val absAmplitude = amplitude.absoluteValue

            if (!useRms) {
                // 峰值模式：记录最大幅度
                if (absAmplitude > maxAmplitude) {
                    maxAmplitude = absAmplitude
                }
            } else {
                // RMS 模式：累加平方
                sumSquares += amplitude * amplitude
            }
        }

        return if (useRms) {
            // RMS = sqrt(mean(square))
            val rms = sqrt(sumSquares / sampleCount)
            rms.coerceIn(0f, 1f)
        } else {
            maxAmplitude.coerceIn(0f, 1f)
        }
    }
}