package ai.tuobot.sdk.util

import ai.tuobot.sdk.model.AudioConfig
import ai.tuobot.vad.silero.Vad
import ai.tuobot.vad.silero.VadSilero
import ai.tuobot.vad.silero.config.FrameSize
import ai.tuobot.vad.silero.config.Mode
import ai.tuobot.vad.silero.config.SampleRate
import ai.tuobot.vad.yamnet.VadYamnet
import android.Manifest
import android.content.Context
import android.content.pm.PackageManager
import android.media.AudioFormat
import android.media.AudioRecord
import android.media.AudioTrack
import android.media.MediaCodec
import android.media.MediaRecorder
import android.media.audiofx.AcousticEchoCanceler
import android.media.audiofx.NoiseSuppressor
import android.util.Log
import android.util.TimeUtils
import androidx.core.app.ActivityCompat
//import com.arthenica.mobileffmpeg.Config
//import com.arthenica.mobileffmpeg.FFmpeg
import com.arthenica.ffmpegkit.FFmpegKitConfig
import com.arthenica.ffmpegkit.FFmpegKit
import com.arthenica.ffmpegkit.FFmpegSession
import com.arthenica.ffmpegkit.ReturnCode

import kotlinx.coroutines.CoroutineScope
import kotlinx.coroutines.Dispatchers
import kotlinx.coroutines.SupervisorJob
import java.io.File
import java.io.FileOutputStream
import java.nio.ByteBuffer
import java.nio.ByteOrder
import ai.tuobot.vad.yamnet.Vad as VadOfYamnet
import ai.tuobot.vad.yamnet.config.FrameSize as FrameSizeOfYamnet
import ai.tuobot.vad.yamnet.config.Mode as ModeOfYamnet
import ai.tuobot.vad.yamnet.config.SampleRate as SampleRateOfYamnet

internal class AudioRecorder(private val context: Context, private val audioConfig:AudioConfig, val listeneStatus: (status:Int) -> Unit, private val listenedFileCallB: (finishFile:File) -> Unit ){
    companion object {
        private const val SAMPLE_RATE = 24000 // 24000
        private const val FRAME_SIZE = SAMPLE_RATE/100/6
        private const val CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_MONO
        private const val AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT
        private const val BIT_RATE = 48000
        //private val BUFFER_SIZE = AudioRecord.getMinBufferSize(SAMPLE_RATE, CHANNEL_CONFIG, AUDIO_FORMAT)

        // Silero
        private val DEFAULT_SAMPLE_RATE = SampleRate.SAMPLE_RATE_8K
        private val DEFAULT_FRAME_SIZE = FrameSize.FRAME_SIZE_512
        private val DEFAULT_MODE = Mode.AGGRESSIVE

        // Yamnet
        private val DEFAULT_SAMPLE_RATE_OF_YAMNET = SampleRateOfYamnet.SAMPLE_RATE_16K
        private val DEFAULT_FRAME_SIZEOF_YAMNET = FrameSizeOfYamnet.FRAME_SIZE_487
        private val DEFAULT_MODE_OF_YAMNET = ModeOfYamnet.AGGRESSIVE

        private val DEFAULT_SILENCE_DURATION_MS = 300
        private val DEFAULT_SPEECH_DURATION_MS = 1

    }
    private val coroutineScope = CoroutineScope(Dispatchers.IO + SupervisorJob())
    private lateinit var audioRecord: AudioRecord
    private lateinit var mediaCodec: MediaCodec
    private var isRecording = false
    private var listening = true
    private var TAG = "AudioRecorder"
    private var minBufferSize = AudioRecord.getMinBufferSize(DEFAULT_SAMPLE_RATE.value, CHANNEL_CONFIG, AUDIO_FORMAT)

    private lateinit var pcmFile: File
    private lateinit var mp3File: File
    private var echoCanceler: AcousticEchoCanceler? = null

    private var silenceStartTime = 0L

    private var vad: VadSilero? = null

    private var vadYamnet: VadYamnet? = null

    private var mute = false

    val echoCancellation = EchoCancellation()

    var currentSilenceDuration = audioConfig.end_listen_silence_time_of_wakeup

    val CIRCULAR_BUFFER_SIZE = (2 * audioConfig.sampleRate * 0.8).toInt()
    val circularBuffer = CircularBuffer(CIRCULAR_BUFFER_SIZE)
    fun startRecording():Boolean {

        if (context.let {
                ActivityCompat.checkSelfPermission(
                    it,
                    Manifest.permission.RECORD_AUDIO
                )
            } != PackageManager.PERMISSION_GRANTED
        ) {
            return false
        }

        if(audioConfig.internalAEC){
            minBufferSize = maxOf(AudioRecord.getMinBufferSize(
                SAMPLE_RATE,
                CHANNEL_CONFIG,
                AUDIO_FORMAT
            ),2048)
        }else{
            if ((audioConfig.VAD && audioConfig.sampleRate == 16000) || !audioConfig.VAD){
                minBufferSize = maxOf(
                    AudioRecord.getMinBufferSize(
                        DEFAULT_SAMPLE_RATE_OF_YAMNET.value,
                        CHANNEL_CONFIG,
                        AUDIO_FORMAT
                    ),
                    2 * DEFAULT_FRAME_SIZEOF_YAMNET.value
                )
            }else{
                minBufferSize = maxOf(
                    AudioRecord.getMinBufferSize(
                        DEFAULT_SAMPLE_RATE_OF_YAMNET.value,
                        CHANNEL_CONFIG,
                        AUDIO_FORMAT
                    ),
                    2 * DEFAULT_FRAME_SIZE.value
                )
            }
        }

        if(audioConfig.internalAEC){
            // 初始化回声消除
            echoCancellation.init(SAMPLE_RATE, FRAME_SIZE)

            audioRecord = AudioRecord(
                MediaRecorder.AudioSource.MIC,
                SAMPLE_RATE,
                CHANNEL_CONFIG,
                AUDIO_FORMAT,
                minBufferSize
            )
        }else{
            if ((audioConfig.VAD && audioConfig.sampleRate == 16000) || !audioConfig.VAD){
                audioRecord = AudioRecord(
                    MediaRecorder.AudioSource.VOICE_COMMUNICATION, // 使用 VOICE_COMMUNICATION源或VOICE_RECOGNITION
                    DEFAULT_SAMPLE_RATE_OF_YAMNET.value,
                    CHANNEL_CONFIG,
                    AUDIO_FORMAT,
                    minBufferSize
                )

            }else{
                audioRecord = AudioRecord(
                    MediaRecorder.AudioSource.VOICE_COMMUNICATION, // 使用 VOICE_COMMUNICATION源或VOICE_RECOGNITION
                    DEFAULT_SAMPLE_RATE.value,
                    CHANNEL_CONFIG,
                    AUDIO_FORMAT,
                    minBufferSize
                )
            }
        }

        if (audioConfig.VAD && audioConfig.sampleRate == 16000){
            vadYamnet = VadOfYamnet.builder()
                .setContext(context)
                .setSampleRate(DEFAULT_SAMPLE_RATE_OF_YAMNET)
                .setFrameSize(DEFAULT_FRAME_SIZEOF_YAMNET)
                .setMode(DEFAULT_MODE_OF_YAMNET)
                .setSilenceDurationMs(audioConfig.end_listen_silence_time_of_wakeup)
                .setSpeechDurationMs(DEFAULT_SPEECH_DURATION_MS)
                .build()

        }else if (audioConfig.VAD){
            vad = Vad.builder()
                .setContext(context)
                .setSampleRate(DEFAULT_SAMPLE_RATE)
                .setFrameSize(DEFAULT_FRAME_SIZE)
                .setMode(DEFAULT_MODE)
                .setSilenceDurationMs(audioConfig.end_listen_silence_time_of_wakeup)
                .setSpeechDurationMs(DEFAULT_SPEECH_DURATION_MS)
                .build()

        }

        if (NoiseSuppressor.isAvailable()) {
            val noiseSuppressor = NoiseSuppressor.create(audioRecord.audioSessionId)
            noiseSuppressor.enabled = true
        }

        // 检查并启用回声消除
        if (AcousticEchoCanceler.isAvailable()) {
            echoCanceler = AcousticEchoCanceler.create(audioRecord.audioSessionId)
            echoCanceler?.enabled = true
        }

        // 自动调整增益
        /*if (AutomaticGainControl.isAvailable()) {
            val agc = AutomaticGainControl.create(audioRecord.audioSessionId)
            agc.enabled = true
        }*/

        // 启用噪音抑制
        if (NoiseSuppressor.isAvailable() && audioConfig.ANS) {
            val ns = NoiseSuppressor.create(audioRecord.audioSessionId)
            ns.enabled = true
        }

        isRecording = true
        audioRecord.startRecording()

        var listentCount = 0

        Thread {
            val buffer = ByteArray(minBufferSize)
            val shortBufferIn = ShortArray(minBufferSize / 2) // 16-bit PCM，每个采样占用两个字节
            var shortBuffer = ShortArray(minBufferSize / 2)
            do{
                if (mute){
                    Thread.sleep(50)
                    continue
                }
                shortBuffer.fill(0)
                listening = true
                silenceStartTime = 0L
                var noSay = true

                var fileName = "recorded_audio${listentCount%8}.webm"
                val pcmfileName = "recorded_audio${listentCount%8}.pcm"

                if (audioConfig.format == "mp3"){
                    fileName = "recorded_audio${listentCount%8}.mp3"
                }else if (audioConfig.format == "wav"){
                    fileName = "recorded_audio${listentCount%8}.wav"
                }

                mp3File = File(context.filesDir, fileName)
                pcmFile = File(context.filesDir, pcmfileName)
                var startSay = false
                var writedPreAudio = false
                FileOutputStream(pcmFile).use { outputStream ->
                    while (listening && !mute) {
                        val read = audioRecord.read(buffer, 0, minBufferSize)
                        circularBuffer.write(buffer)
                        // 增益调整
                        /*for (i in buffer.indices) {
                            buffer[i] = (buffer[i] * 1.2).toInt().toByte() // 简单的增益调整示例
                        }*/
                        var isPlaying = false
                        if (read > 0) {
                            if(audioConfig.internalAEC){
                                // 将 ByteArray 转换为 ShortArray 用于计算音量
                                ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(shortBufferIn, 0, read / 2)
                                // 从 referenceQueue 获取参考信号
                                val referenceBuffer = AudioTrackPlayer.referenceQueue.poll()

                                if (referenceBuffer==null){
                                    //Log.d(TAG, "referenceBuffer==null")
                                    shortBuffer = shortBufferIn.copyOf()
                                }else{
                                    isPlaying = true
                                    Log.d(TAG, "get referenceBuffer referenceQueue.size: ${AudioTrackPlayer.referenceQueue.size}")
                                    echoCancellation.processAudio(shortBufferIn, referenceBuffer, shortBuffer)
                                }
                            }else{
                                // 将 ByteArray 转换为 ShortArray 用于计算音量
                                ByteBuffer.wrap(buffer).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(shortBuffer, 0, read / 2)
                            }

                            var isSpeeching = false
                            val isOverAmplitudeThread = overMaxAmplitudeThread(shortBuffer, read)

                            if (audioConfig.VAD){
                                try {
                                    if (audioConfig.sampleRate == 16000){
                                        val speech = "Speech"
                                        val soundCategory = vadYamnet?.classifyAudio(speech, shortBuffer)

                                        if (soundCategory?.label.equals(speech) && isOverAmplitudeThread) {
                                            isSpeeching = true
                                            Log.d(TAG, "vad_y: true")
                                        }
                                    }else{
                                        if (vad?.isSpeech(shortBuffer) == true && isOverAmplitudeThread) {
                                            isSpeeching = true
                                            Log.d(TAG, "vad_s: true")
                                        }
                                    }

                                }catch (e:IllegalStateException){
                                    e.printStackTrace()
                                }catch (e:Exception){
                                    e.printStackTrace()
                                }
                            }else{
                                isSpeeching = overMaxAmplitudeThread(shortBuffer, read)
                            }

                            if (!isSpeeching) {
                                if (silenceStartTime <= 0L) {
                                    silenceStartTime = System.currentTimeMillis()
                                } else if (startSay && System.currentTimeMillis() - silenceStartTime > currentSilenceDuration) {
                                    listening = false
                                    break
                                }
                            } else if (System.currentTimeMillis() - silenceStartTime > 500){
                                //listeneStatus(0) //Listening...
                                silenceStartTime = 0L
                                noSay = false
                            }

                            if (isSpeeching && !isPlaying){
                                listeneStatus(0) //Listening...
                            }

                            if (isSpeeching && !startSay){
                                startSay = true
                            }


                            if (startSay) {
                                var cirBufferReadSize = 0
                                val dataToWriteTemp = ByteArray(CIRCULAR_BUFFER_SIZE)

                                if (!writedPreAudio){
                                    writedPreAudio = true
                                    cirBufferReadSize = circularBuffer.read(dataToWriteTemp, CIRCULAR_BUFFER_SIZE)
                                    Log.d(TAG, "cirBufferReadSize: $cirBufferReadSize")
                                }

                                if(!audioConfig.internalAEC){
                                    // 拼接音频
                                    if (cirBufferReadSize > 0 && cirBufferReadSize - buffer.size > 0){
                                        outputStream.write(dataToWriteTemp, 0, cirBufferReadSize - buffer.size)
                                        outputStream.write(buffer, 0, read)
                                    }else{
                                        outputStream.write(buffer, 0, read)
                                    }

                                }
                                else{
                                    // 确保字节序正确
                                    val byteBuffer = ByteBuffer.allocate(shortBuffer.size * 2).apply {
                                        order(ByteOrder.LITTLE_ENDIAN) // 确保字节序正确
                                    }

                                    // 将 ShortArray 放入 ByteBuffer 中
                                    byteBuffer.asShortBuffer().put(shortBuffer)

                                    // 获取 ByteArray
                                    val byteArray = byteBuffer.array()

                                    // 确保写入的 byteArray 是完整的音频数据
                                    if (cirBufferReadSize > 0 && cirBufferReadSize - byteArray.size > 0){
                                        Log.d(TAG, "cirBufferReadSize > 0")
                                        outputStream.write(dataToWriteTemp, 0, cirBufferReadSize - byteArray.size)
                                        outputStream.write(byteArray, 0, byteArray.size)
                                    }else{
                                        outputStream.write(byteArray, 0, byteArray.size)
                                    }

                                }

                            }

                        }

                    }

                    outputStream.close()

                    if (noSay){
                        //Log.e(TAG, "You say nothing")
                        listeneStatus(1) //You say nothing
                        /*coroutineScope.launch {
                            Thread.sleep(1000)
                            listeneStatus(2)
                        }*/
                    }
                    if (!noSay && isRecording && !mute) {

                        //listeneStatus(2)
                        if (audioConfig.format == "mp3")
                            encodeToMp3(pcmfileName, fileName)
                        else if (audioConfig.format == "wav")
                            encodeToWav(pcmfileName, fileName)
                        else
                            encodeToWebm(pcmfileName, fileName)
                    }
                }
                listeneStatus(2)
                listentCount ++
            } while (isRecording)

            audioRecord.stop()
            audioRecord.release()
            listeneStatus(2)

            vad?.close()
            vad = null

            vadYamnet?.close()
            vadYamnet = null
            if(audioConfig.internalAEC)
                echoCancellation.cleanup()

        }.start()

        return true
    }

    private fun overMaxAmplitudeThread(
        shortBuffer: ShortArray,
        read: Int
    ): Boolean {
        val maxAmplitude = calculateMaxAmplitude(shortBuffer, read / 2)
        //Log.d(TAG, "maxAmplitude:${maxAmplitude}")
        return maxAmplitude > audioConfig.volume_threshold
    }

    fun applyHighPassFilter(audioData: ByteArray, sampleRate: Int): ByteArray {
        // 示例：简单的高通滤波器
        // 使用更复杂的算法来处理实际情况
        val filteredData = audioData.clone()
        for (i in 1 until filteredData.size) {
            filteredData[i] = (filteredData[i] - 0.95 * filteredData[i - 1]).toInt().toByte()
        }
        return filteredData
    }

    private fun encodeToMp3(pcmfileName: String, fileName: String) {
        val pcmFile = File(context.filesDir, pcmfileName)
        val encodeFile = File(context.filesDir, fileName)

        val command = arrayOf(
            "-y", // 添加此选项来覆盖现有文件
            "-f", "s16le",
            "-ar", if(audioConfig.internalAEC) SAMPLE_RATE.toString() else if(audioConfig.sampleRate == 16000) DEFAULT_SAMPLE_RATE_OF_YAMNET.value.toString() else DEFAULT_SAMPLE_RATE.value.toString(),
            "-ac", "1",
            "-i", pcmFile.absolutePath,
            "-codec:a", "libmp3lame",
            "-b:a", "48k", // 设定固定比特率 //"-qscale:a", "2",
            encodeFile.absolutePath
        ).joinToString(" ")

        doEncode(command, fileName, encodeFile)
    }

    private fun encodeToWebm(pcmfileName: String, fileName: String) {
        val pcmFile = File(context.filesDir, pcmfileName)
        val encodeFile = File(context.filesDir, fileName)

        val command = arrayOf(
            "-y", // 添加此选项来覆盖现有文件
            "-f", "s16le",
            "-ar", if(audioConfig.internalAEC) SAMPLE_RATE.toString() else if(audioConfig.sampleRate == 16000) DEFAULT_SAMPLE_RATE_OF_YAMNET.value.toString() else DEFAULT_SAMPLE_RATE.value.toString(),
            "-ac", "1",
            "-i", pcmFile.absolutePath,
            "-c:a", "libopus",
            "-b:a", "48k", // 设定固定比特率
            encodeFile.absolutePath
        ).joinToString(" ")

        doEncode(command, fileName, encodeFile)
    }

    private fun encodeToWebm2(pcmfileName: String, fileName: String) {
        val pcmFile = File(context.filesDir, pcmfileName)
        val encodeFile = File(context.filesDir, fileName)

        val command = arrayOf(
            "-y", // 添加此选项来覆盖现有文件
            "-f", "s16le",
            "-ar", if(audioConfig.internalAEC) SAMPLE_RATE.toString() else if(audioConfig.sampleRate == 16000) DEFAULT_SAMPLE_RATE_OF_YAMNET.value.toString() else DEFAULT_SAMPLE_RATE.value.toString(),
            "-ac", "1",
            "-i", pcmFile.absolutePath,
            "-c:a", "libopus",
            "-b:a", "48k", // 设定固定比特率
            encodeFile.absolutePath
        ).joinToString(" ")

        doEncode(command, fileName, encodeFile)
    }

    /*private fun encodeToWav(pcmFileName: String, wavFileName: String) {
        val pcmFile = File(context.filesDir, pcmFileName)
        val wavFile = File(context.filesDir, wavFileName)

        val command = arrayOf(
            "-y", // 覆盖现有文件
            "-f", "s16le", // PCM 格式
            "-ar", if(audioConfig.internalAEC) SAMPLE_RATE.toString() else if (audioConfig.sampleRate == 16000) DEFAULT_SAMPLE_RATE_OF_YAMNET.value.toString() else DEFAULT_SAMPLE_RATE.value.toString(),
            "-ac", "1", // 单声道
            "-i", pcmFile.absolutePath, // 输入文件
            "-c:a", "pcm_s16le", // 输出格式为 WAV
            wavFile.absolutePath // 输出文件路径
        )

        doEncode(command, wavFileName, wavFile)
    }

    private fun doEncode(
        command: Array<String>,
        fileName: String,
        encodeFile: File
    ) {
        FFmpeg.executeAsync(command) { executionId, returnCode ->
            if (returnCode == 0) {
                //Log.d(TAG, "转换成功，文件路径: ${mp3File.absolutePath}")
                Log.d(TAG, "listenedFileCallB ${fileName}")
                listenedFileCallB(encodeFile)

            } else {
                //Log.e(TAG, "转换失败，错误代码: $returnCode")
                val output = Config.getLastCommandOutput()
                Log.d(TAG, "FFmpeg output: $output")
            }
        }
    }*/

    private fun encodeToWav(pcmFileName: String, wavFileName: String) {
        val pcmFile = File(context.filesDir, pcmFileName)
        val wavFile = File(context.filesDir, wavFileName)

        // FFmpeg 命令数组转为字符串
        val command = listOf(
            "-y", // 覆盖现有文件
            "-f", "s16le", // PCM 格式
            "-ar", if (audioConfig.internalAEC) SAMPLE_RATE.toString()
            else if (audioConfig.sampleRate == 16000) DEFAULT_SAMPLE_RATE_OF_YAMNET.value.toString()
            else DEFAULT_SAMPLE_RATE.value.toString(),
            "-ac", "1", // 单声道
            "-i", pcmFile.absolutePath, // 输入文件
            "-c:a", "pcm_s16le", // 输出格式为 WAV
            "-b:a", "48k", // 设定固定比特率
            wavFile.absolutePath // 输出文件路径
        ).joinToString(" ") // 将数组转为字符串并用空格分隔

        doEncode(command, wavFileName, wavFile)
    }

    private fun doEncode(
        command: String,
        fileName: String,
        encodeFile: File
    ) {
        FFmpegKit.executeAsync(command) { session: FFmpegSession ->
            val returnCode = session.returnCode
            if (ReturnCode.isSuccess(returnCode)) {
                //Log.d(TAG, "转换成功，文件路径: ${encodeFile.absolutePath}")
                listenedFileCallB(encodeFile)
                //Log.d(TAG, "listenedFileCallB ${fileName}")
            } else {
                Log.e(TAG, "FFmpeg convert failed with return code: $returnCode")
            }
        }
    }

    private fun calculateMaxAmplitude(buffer: ShortArray, size: Int): Int {
        var maxAmplitude = 0
        for (i in 0 until size) {
            val amplitude = Math.abs(buffer[i].toInt())
            if (amplitude > maxAmplitude) {
                maxAmplitude = amplitude
            }
        }
        return maxAmplitude
    }

    fun setSilentceTimeDyn(time:Int){
        Log.d(TAG,"setSilentceTimeDyn:$time")
        currentSilenceDuration = time
        vad?.let {
            it.silenceDurationMs = time
        }
        vadYamnet?.let {
            it.silenceDurationMs = time
        }
    }

    fun stopRecording() {
        isRecording = false
        listening = false
    }

    fun setMute(mute: Boolean) {
        this.mute = mute
    }
}
