package com.lkdont.android.media.camera

import android.graphics.SurfaceTexture
import android.media.AudioFormat
import android.media.MediaFormat
import android.media.MediaMuxer
import android.media.MediaRecorder
import android.opengl.EGLContext
import android.os.Handler
import android.os.HandlerThread
import android.os.Looper
import android.os.Message
import android.util.Log
import com.lkdont.android.media.MediaEncoder
import com.lkdont.android.media.audio.recorder.PcmRecorder
import com.lkdont.android.media.opengl.DataInputSurface
import java.io.File

/**
 * 摄像头录制器
 *
 * @author lkdont
 */
class CameraVideoRecorder {

    companion object {
        private const val TAG = "CameraVideoRecorder"

        private const val DEFAULT_AUDIO_SOURCE = MediaRecorder.AudioSource.DEFAULT
        private const val DEFAULT_AUDIO_SAMPLE_RATE = 44100
        private const val DEFAULT_AUDIO_CHANNEL = AudioFormat.CHANNEL_IN_MONO
        private const val DEFAULT_AUDIO_CHANNEL_COUNT = 1
        private const val DEFAULT_AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT
        private const val DEFAULT_AUDIO_BITRATE = 64 * 1000
    }

    private var recording = false

    /*** 是否正在录制 */
    fun isRecording(): Boolean {
        return recording
    }

    private var threadLooper: Looper? = null
    private var recordHandler: RecordHandler? = null

    /*** 开始录制 */
    fun startRecording(config: Config) {
        if (recording) {
            Log.w(TAG, "startRecording: 不能重复启动录制器")
            return
        }
        // 启动一个HandlerThread
        val thread = HandlerThread(
            "CameraVideoRecorder",
            android.os.Process.THREAD_PRIORITY_URGENT_AUDIO
        )
        thread.start()
        threadLooper = thread.looper
        recordHandler =
            RecordHandler(
                threadLooper!!
            )
        // 开始录制
        recordHandler!!.postStartRecording(config)
        // 设置录制状态
        recording = true
    }

    /*** 设置视频预览画面数据相关的SurfaceTexture的id */
    fun setTextureId(id: Int) {
        recordHandler?.postSetTextureId(id)
    }

    /*** 更新画面上下文 */
    fun updateSharedContext(context: EGLContext) {
        recordHandler!!.postUpdateSharedContext(context)
    }

    /*** 停止录制 */
    fun stopRecording() {
        // 停止录制
        recording = false
        recordHandler!!.postStopRecording()
        threadLooper = null
        recordHandler = null
    }

    private val transform = FloatArray(16)

    /*** 输入视频帧数据 */
    fun inputVideoFrame(surfaceTexture: SurfaceTexture) {
        if (recordHandler == null) {
            return
        }
        val timestamp = surfaceTexture.timestamp
        if (timestamp <= 0) {
            Log.w(TAG, "inputVideoFrame: timestamp($timestamp)不能小于等于0")
            return
        }
        surfaceTexture.getTransformMatrix(transform)
        recordHandler!!.postInputVideoFrame(
            VideoFrame(
                timestamp,
                transform
            )
        )
    }

    /*** 录制参数 */
    data class Config(
        val outputPath: String,
        val videoWidth: Int,
        val videoHeight: Int,
        val videoBitrate: Int
    )

    /*** 视频数据帧 */
    data class VideoFrame(
        val timestamp: Long,
        val transform: FloatArray
    ) {
        override fun equals(other: Any?): Boolean {
            if (this === other) return true
            if (javaClass != other?.javaClass) return false

            other as VideoFrame

            if (timestamp != other.timestamp) return false
            if (!transform.contentEquals(other.transform)) return false

            return true
        }

        override fun hashCode(): Int {
            var result = timestamp.hashCode()
            result = 31 * result + transform.contentHashCode()
            return result
        }
    }

    /*** 音频数据帧 */
    data class AudioFrame(
        val data: ByteArray,
        val length: Int
    ) {
        override fun equals(other: Any?): Boolean {
            if (this === other) return true
            if (javaClass != other?.javaClass) return false

            other as AudioFrame

            if (!data.contentEquals(other.data)) return false
            if (length != other.length) return false

            return true
        }

        override fun hashCode(): Int {
            var result = data.contentHashCode()
            result = 31 * result + length
            return result
        }
    }

    /*** 录制Handler */
    private class RecordHandler(looper: Looper) : Handler(looper) {

        companion object {
            private const val MSG_START_RECORDING = 1
            private const val MSG_STOP_RECORDING = 2
            private const val MSG_AUDIO_FRAME_AVAILABLE = 3
            private const val MSG_VIDEO_FRAME_AVAILABLE = 4
            private const val MSG_SET_TEXTURE_ID = 5
            private const val MSG_UPDATE_SHARED_CONTEXT = 6
            private const val MSG_QUIT = 7
        }

        fun postStartRecording(config: Config) {
            val msg = obtainMessage(MSG_START_RECORDING)
            msg.obj = config
            sendMessage(msg)
        }

        fun postStopRecording() {
            sendEmptyMessage(MSG_STOP_RECORDING)
            sendEmptyMessage(MSG_QUIT)
        }

        fun postInputVideoFrame(frame: VideoFrame) {
            val msg = obtainMessage(MSG_VIDEO_FRAME_AVAILABLE)
            msg.obj = frame
            sendMessage(msg)
        }

        private fun postInputAudioFrame(frame: AudioFrame) {
            val msg = obtainMessage(MSG_AUDIO_FRAME_AVAILABLE)
            msg.obj = frame
            sendMessage(msg)
        }

        fun postSetTextureId(id: Int) {
            val msg = obtainMessage(MSG_SET_TEXTURE_ID)
            msg.arg1 = id
            sendMessage(msg)
        }

        fun postUpdateSharedContext(eglContext: EGLContext) {
            val msg = obtainMessage(MSG_UPDATE_SHARED_CONTEXT)
            msg.obj = eglContext
            sendMessage(msg)
        }

        private lateinit var videoEncoder: MediaEncoder
        private lateinit var dataInputSurface: DataInputSurface

        private var videoFrameStartPts = 0L

        /*** 获取视频帧展示时间，单位纳秒 */
        private fun getVideoPresentationTime(currentTime: Long): Long {
            // 由于视频画面开启比声音早，所以视频帧展示时间需要等待声音准备好才开始计算
            if (!videoEncoder.isMediaMuxerReady) {
                return 0
            }
            if (videoFrameStartPts <= 0) {
                videoFrameStartPts = currentTime
            }
            return currentTime - videoFrameStartPts
        }

        // 音频录制器
        private lateinit var audioRecorder: PcmRecorder
        private var nextAudioPtsUs = 0L

        /*** 获取音频帧展示时间，单位微秒 */
        private fun getAudioPresentationTime(frameLen: Int): Long {
            val time = nextAudioPtsUs
            // 默认声音采样格式为AudioFormat.ENCODING_PCM_16BIT，大小为2字节
            nextAudioPtsUs += ((frameLen * 1000000L) / (DEFAULT_AUDIO_SAMPLE_RATE * DEFAULT_AUDIO_CHANNEL_COUNT * 2))
            return time
        }

        /*** 准备音频录制器 */
        private fun prepareAudioRecorder() {
            // 初始化
            if (!this::audioRecorder.isInitialized || audioRecorder.isReleased) {
                audioRecorder =
                    PcmRecorder()
            } else {
                audioRecorder.reset()
            }
            audioRecorder.setConfig(
                DEFAULT_AUDIO_SOURCE,
                DEFAULT_AUDIO_SAMPLE_RATE,
                DEFAULT_AUDIO_CHANNEL,
                DEFAULT_AUDIO_FORMAT
            )
            audioRecorder.prepare()
            audioRecorder.onAudioDataAvailable = { _, data, length ->
                // 录制声音数据回调
                postInputAudioFrame(AudioFrame(data, length))
            }
            audioRecorder.start()
        }

        private fun stopAudioRecorder() {
            if (this::audioRecorder.isInitialized && !audioRecorder.isReleased) {
                audioRecorder.stop()
            }
        }

        /*** 准备编码器 */
        private fun prepareEncoder(config: Config) {
            // 创建编码器
            videoEncoder = MediaEncoder()
            videoEncoder.start(
                MediaEncoder.Config.Builder(
                    config.outputPath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4
                )
                    // 音频
                    .audioFormat(MediaFormat.MIMETYPE_AUDIO_AAC)
                    .audioSampleRate(DEFAULT_AUDIO_SAMPLE_RATE)
                    .audioChannelCount(DEFAULT_AUDIO_CHANNEL_COUNT)
                    .audioBitrate(DEFAULT_AUDIO_BITRATE)
                    // 视频
                    .videoFormat(MediaFormat.MIMETYPE_VIDEO_AVC)
                    .videoWidth(config.videoWidth)
                    .videoHeight(config.videoHeight)
                    .videoBitrate(config.videoBitrate)
                    .videoFrameRate(30)
                    .videoIFrameInterval(5)
                    .build()
            )
            // 创建输入Surface
            dataInputSurface = DataInputSurface(videoEncoder.videoInputSurface!!)
        }

        /*** 停止编码器 */
        private fun stopEncoder() {
            videoEncoder.drainAudioEncoder(true)
            videoEncoder.drainVideoEncoder(true)
            videoEncoder.stop()
        }

        /*** 释放资源 */
        private fun release() {
            dataInputSurface.release()
            if (this::audioRecorder.isInitialized && !audioRecorder.isReleased) {
                audioRecorder.release()
            }
        }

        private fun handleVideoFrame(frame: VideoFrame) {
            videoEncoder.drainVideoEncoder(false)
            val pts = getVideoPresentationTime(frame.timestamp)
            dataInputSurface.draw(pts, frame.transform)
        }

        private fun handleAudioFrame(frame: AudioFrame) {
            val pts = getAudioPresentationTime(frame.length)
            videoEncoder.writeAudioData(pts, frame.data, frame.length)
            videoEncoder.drainAudioEncoder(false)
        }

        private fun updateSharedContext(context: EGLContext) {
            dataInputSurface.setEGLContext(context)
        }

        override fun handleMessage(msg: Message) {
            super.handleMessage(msg)
            when (msg.what) {
                MSG_SET_TEXTURE_ID -> {
                    // 设置视频数据id
                    dataInputSurface.textureId = msg.arg1
                }
                MSG_UPDATE_SHARED_CONTEXT -> {
                    // 更新视频画面上下文
                    updateSharedContext(msg.obj as EGLContext)
                }
                MSG_START_RECORDING -> {
                    val config = msg.obj as Config
                    // 删除旧的录制文件
                    val file = File(config.outputPath)
                    if (file.exists() && !file.delete()) {
                        Log.w(TAG, "handleMessage: 检测到有旧的录制文件，但是删除失败")
                    }
                    // 准备音频录制器
                    prepareAudioRecorder()
                    // 准备编码器
                    prepareEncoder(config)
                }
                MSG_STOP_RECORDING -> {
                    // 停止录制
                    stopAudioRecorder()
                    stopEncoder()
                    // 释放资源
                    release()
                    // 重置变量
                    nextAudioPtsUs = 0
                    videoFrameStartPts = 0
                }
                MSG_QUIT -> {
                    // 退出线程
                    looper.quit()
                }
                MSG_VIDEO_FRAME_AVAILABLE -> {
                    // 视频帧更新
                    handleVideoFrame(msg.obj as VideoFrame)
                }
                MSG_AUDIO_FRAME_AVAILABLE -> {
                    // 声音数据更新
                    handleAudioFrame(msg.obj as AudioFrame)
                }
            }
        }
    }
}