package com.example.opengl.decoder

import android.media.*
import android.media.MediaCodec.BUFFER_FLAG_END_OF_STREAM
import android.media.MediaCodec.BufferInfo
import android.os.Build
import android.util.Log
import android.view.Surface
import androidx.annotation.RequiresApi
import java.nio.ByteBuffer
import java.util.concurrent.Executors

/**
 *
 *  // 获取可用的输入缓冲区的索引
public int dequeueInputBuffer (long timeoutUs)
// 获取输入缓冲区
public ByteBuffer getInputBuffer(int index)
// 将填满数据的inputBuffer提交到编码队列
public final void queueInputBuffer(int index,int offset, int size, long presentationTimeUs, int flags)
// 获取已成功编解码的输出缓冲区的索引
public final int dequeueOutputBuffer(BufferInfo info, long timeoutUs)
// 获取输出缓冲区
public ByteBuffer getOutputBuffer(int index)
// 释放输出缓冲区
public final void releaseOutputBuffer(int index, boolean render)
 */

object MediaCodecHelper {
    //视频多媒体提取器
    private lateinit var mVideoExtractor: MediaExtractor

    //音频多媒体提取器
    private lateinit var mAudioExtractor: MediaExtractor

    //视频轨道下标
    private var mVideoTrackIndex = -1

    //音频轨道下标
    private var mAudioTrackIndex = -1

    //视频类型
    private var mVideoKeyType = ""

    //音频类型
    private var mAudioKeyType = ""

    //视频格式
    private lateinit var mVideoFormat: MediaFormat

    //视频解码器
    private lateinit var mVideoDecoder: MediaCodec

    //音频格式
    private lateinit var mAudioFormat: MediaFormat

    //音频解码器
    private lateinit var mAudioDecoder: MediaCodec


    //定义视频解码线程池
    private val mVideoExecutor = Executors.newSingleThreadExecutor()

    //定义音频解码线程池
    private val mAudioExecutor = Executors.newSingleThreadExecutor()

    //解码完成标记
    private var isFinishDecode = false
    private lateinit var audioTrack: AudioTrack
    private var isPlay = false
    private var sampleBuffer: ByteBuffer? = null
    fun initMediaCodec(videoPath: String,surface: Surface): Unit {

        //创建视频多媒体提取器
        mVideoExtractor = MediaExtractor()
        //设置视频多媒体路径
        mVideoExtractor.setDataSource(videoPath)

        //创建音频多媒体提取器
        mAudioExtractor = MediaExtractor()
        //设置音频多媒体路径
        mAudioExtractor.setDataSource(videoPath)


        for (index in 0 until mVideoExtractor.trackCount) {
            val format = mVideoExtractor.getTrackFormat(index)
            val keyMine = format.getString(MediaFormat.KEY_MIME)
            if (keyMine?.startsWith("video/") == true) {
                mVideoTrackIndex = index
                mVideoKeyType = keyMine
                mVideoFormat = format
                continue
            }
            if (keyMine?.startsWith("audio/") == true) {
                mAudioTrackIndex = index
                mAudioKeyType = keyMine
                mAudioFormat = format
                continue
            }
        }
        //创建视频解码器
        mVideoDecoder = MediaCodec.createDecoderByType(mVideoKeyType)
        //设置视频轨道
        mVideoExtractor.selectTrack(mVideoTrackIndex)
        //配置解码器参数
        mVideoDecoder.configure(mVideoFormat, surface, null, 0)
        //开始执行解码
        mVideoDecoder.start()


        //创建音频解码器
        mAudioDecoder = MediaCodec.createDecoderByType(mAudioKeyType)
        //设置音频轨道
        mAudioExtractor.selectTrack(mAudioTrackIndex)
        //配置解码器参数
        mAudioDecoder.configure(mAudioFormat, null, null, 0)
        //开始执行解码
        mAudioDecoder.start();

        //获取音频采样率
        val sampleRate: Int = mAudioFormat.getInteger(MediaFormat.KEY_SAMPLE_RATE)
        //获取音频声道数
        val channelCount: Int = mAudioFormat.getInteger(MediaFormat.KEY_CHANNEL_COUNT)
        //计算缓冲区大小
        val bufferSize = AudioTrack.getMinBufferSize(
            sampleRate,  AudioFormat.CHANNEL_OUT_STEREO,
            AudioFormat.ENCODING_PCM_16BIT
        )

        val maxInputSize = mAudioFormat.getInteger(MediaFormat.KEY_MAX_INPUT_SIZE);

        var audioInputBufferSize = if (bufferSize > 0) bufferSize * 4 else maxInputSize

        val frameSizeInBytes = channelCount * 2

        audioInputBufferSize = (audioInputBufferSize / frameSizeInBytes) * frameSizeInBytes

        Log.i("TAG147d", "initMediaCodec: sampleRate:$sampleRate ,channelCount:$channelCount ,bufferSize:$bufferSize")
        sampleBuffer = ByteBuffer.allocateDirect(maxInputSize)
        audioTrack = AudioTrack(
            AudioManager.STREAM_MUSIC,
            sampleRate,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            audioInputBufferSize,
            AudioTrack.MODE_STREAM
        )

        audioTrack.play()


        //开启线程池
        mVideoExecutor.execute(videoRunnable)
        mAudioExecutor.execute(audioRunnable)
    }


    fun start() {
        this.isPlay = false
    }

    fun stop() {
        isPlay = true
    }

    //视频解码任务
    private val videoRunnable = object : Runnable {
        //显示时间戳
        var pts = 0L

        @RequiresApi(Build.VERSION_CODES.LOLLIPOP)
        override fun run() {

            try {
                while (!isFinishDecode) {
                    if (!isPlay) {
                        //获取可用的输入缓冲区的索引
                        val inputIndex = mVideoDecoder.dequeueInputBuffer(-1)
                        //缓冲区有数据
                        if (inputIndex > 0) {
                            //读取缓冲区数据
                            val byteBuffer = mVideoDecoder.getInputBuffer(inputIndex)
                            //读取一片或一帧数据
                            byteBuffer?.let {
                                var sampleSize = mVideoExtractor.readSampleData(it, 0)
                                Log.i("TAG124", "run:sampleSize:$sampleSize ")
                                if (sampleSize < 0) {
                                    // sampleSize = 0;
                                    // isFinishDecode = true
                                    mVideoExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
                                } else {
                                    //读取时间戳
                                    val time = mVideoExtractor.sampleTime
                                    //读取一帧数据
                                    mVideoDecoder.queueInputBuffer(inputIndex, 0, sampleSize, time, 0)
                                    /**
                                     * 进入下一个样本。如果没有更多的样本数据可用，返回false(流结束)
                                     * 。当提取本地文件时，
                                     * advance和readSampleData的行为在并发写同一个本地文件时是未定义的
                                     * ;更具体地说，流结束的信号可能比预期的要早。
                                     */
                                    mVideoExtractor.advance()
                                }

                            }
                        }
                        val bufferInfo = BufferInfo()
                        val outIndex = mVideoDecoder.dequeueOutputBuffer(bufferInfo, 0)
                        if (outIndex >= 0) {
                            /* if (bufferInfo.flags == BUFFER_FLAG_END_OF_STREAM){
                                 isFinishDecode = true
                             }*/
                            mVideoDecoder.releaseOutputBuffer(outIndex, true)

                        }
                        if (bufferInfo.flags == BUFFER_FLAG_END_OF_STREAM) {
                            mVideoExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
                        }
                    }
                }
            } catch (e: java.lang.Exception) {

            }/*finally {
              if (mVideoDecoder!=null){
                  mVideoDecoder.stop()
                  mVideoDecoder.release()
              }
              mVideoExtractor.release()
          }*/
        }
    }


    //音频解码任务
    private val audioRunnable = Runnable {
        while (!isFinishDecode) {
            if (!isPlay) {
                //有效数据填充的输入缓冲区的索引
                val inputIndex = mAudioDecoder.dequeueInputBuffer(0)
                if (inputIndex >= 0) {
                        var sampleSize = mAudioExtractor.readSampleData(sampleBuffer!!, 0)
                        if (sampleSize < 0) {
                            //sampleSize = 0
                            mAudioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
                        }else{
                            mAudioDecoder.getInputBuffer(inputIndex)?.put(sampleBuffer)
                            mAudioDecoder.queueInputBuffer(inputIndex, 0, sampleSize, mAudioExtractor.sampleTime, 0)
                            mAudioExtractor.advance()
                        }

                }
                val bufferInfo = BufferInfo()
                val outIndex = mAudioDecoder.dequeueOutputBuffer(bufferInfo, 0)
                if (outIndex >= 0) {
                    val buffer: ByteBuffer? = mAudioDecoder.getOutputBuffer(outIndex)
                    val chunk = ByteArray(bufferInfo.size)
                    buffer?.get(chunk)
                    buffer?.clear()
                    audioTrack.write(chunk, 0, chunk.size)
                   // mAudioDecoder.releaseOutputBuffer(outIndex, false)

                    if (bufferInfo.flags == BUFFER_FLAG_END_OF_STREAM) {
                        mAudioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC)
                    }
                }
            }
        }
    }

     fun release(){
        try {
           mAudioDecoder.release()
           mAudioExtractor.release()
            mVideoDecoder.release()
            mVideoExtractor.release()
        }catch (e:Exception){

        }
    }

}