package com.xabber.myapplication;

import android.content.Context;
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaFormat;
import android.media.MediaRecorder;
import android.os.Build;
import android.util.Log;


import androidx.annotation.RequiresApi;

import com.webrtc.ns.WebRTCNoiseSuppression;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.ByteOrder;

public class AudioRecordManager {
    private static AudioRecordManager INSTANCE;
    // 音频获取源
    private int audioSource = MediaRecorder.AudioSource.MIC;
    // 设置音频采样率，44100是目前的标准，但是某些设备仍然支持22050，16000，11025
    private int sampleRateInHz = 44100;
    // 设置音频的录制的声道
    private int channelConfig = AudioFormat.CHANNEL_IN_MONO;
    // 音频数据格式:PCM 16位每个样本。保证设备支持。PCM 8位每个样本。不一定能得到设备支持。
    private int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
    // 缓冲区字节大小
    private int bufferSizeInBytes;
    private boolean isRecord = false;// 设置正在录制的状态
    private File file;
    private AudioRecord audioRecord;
    private MediaCodec mAudioEncoder;
    private FileOutputStream fos;
    private String nowpath;
    private MediaFormat encodeFormat;

    private String tag = "AudioRecordManager";


    public static AudioRecordManager getInstance(Context context) {
        if (INSTANCE == null) {
            synchronized (AudioRecordManager.class) {
                if (INSTANCE == null) {
                    INSTANCE = new AudioRecordManager(context);
                }
            }
        }
        return INSTANCE;
    }


    private AudioRecordManager(Context context) {
        initAudioRecord();
        initMediaCodec(context);
        file = new File(context.getFilesDir() + "/yb_audio/");
        if (!file.exists()) {
            file.mkdir();
        }
    }

    private void initAudioRecord() {
        bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz,
                channelConfig, audioFormat);
        audioRecord = new AudioRecord(audioSource, sampleRateInHz,
                channelConfig, audioFormat, bufferSizeInBytes);
        if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
            Log.e(tag, "initAudioRecord_fail");
        }
    }


    private void initMediaCodec(Context context) {
        encodeFormat = MediaFormat.createAudioFormat(MediaFormat.MIMETYPE_AUDIO_AAC, 44100, 1);
        encodeFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
        encodeFormat.setInteger(MediaFormat.KEY_BIT_RATE, 44100*16);
        encodeFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 100 * 1024);
    }


    public byte[] shortTobyte(short[] data, int item) {
        byte[] da = new byte[item];
       /* for (int i = 0; i < data.length; i++) {
            if (2 * i < data.length) {
                da[2 * i] = (byte) (data[i] & 0xff);
            }
            if ((2 * i + 1) < data.length) {
                da[2 * i + 1] = (byte) ((data[i] >> 8) & 0xff);
            }
        }*/
        ByteBuffer.wrap(da).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().put(data);
        return da;
    }


    public String stopRecord() {
        if (isRecord) {
            isRecord = false;
            if (mAudioEncoder != null) {
                mAudioEncoder.stop();
                mAudioEncoder.release();
            }

        }
        return nowpath;
    }

    public void startRecord() {
        try {
            mAudioEncoder = MediaCodec.createEncoderByType(MediaFormat.MIMETYPE_AUDIO_AAC);
            mAudioEncoder.configure(encodeFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
        } catch (IOException e) {
            e.printStackTrace();
        }
        isRecord = true;
        // 开启音频文件写入线程
        new Thread(new Runnable() {

            @RequiresApi(api = Build.VERSION_CODES.M)
            @Override
            public void run() {
                writeDateTOFile();
            }
        }).start();
    }


    /**
     * 这里将数据写入文件，但是并不能播放，因为AudioRecord获得的音频是原始的裸音频，
     * 如果需要播放就必须加入一些格式或者编码的头信息。但是这样的好处就是你可以对音频的 裸数据进行处理，比如你要做一个爱说话的TOM
     * 猫在这里就进行音频的处理，然后重新封装 所以说这样得到的音频比较容易做一些音频的处理。然后进行AAc封装
     */
    @RequiresApi(api = Build.VERSION_CODES.M)
    private void writeDateTOFile() {
        // new一个byte数组用来存一些字节数据，大小为缓冲区大小
        short[] audiodata = new short[bufferSizeInBytes / 2];
        int readsize;
        try {
            nowpath = file.getAbsolutePath() + "/" + System.currentTimeMillis() + ".aac";
            fos = new FileOutputStream(nowpath);
            audioRecord.startRecording();
            mAudioEncoder.start();

            while (isRecord == true) {
                readsize = audioRecord.read(audiodata, 0, bufferSizeInBytes / 2);
                //  calc1(audiodata, 0);
                if (AudioRecord.ERROR_INVALID_OPERATION != readsize) {
                    int inputBufferIndex = mAudioEncoder.dequeueInputBuffer(0);

                    if (inputBufferIndex >= 0) {

                        ByteBuffer inputBuffer = mAudioEncoder.getInputBuffer(inputBufferIndex);
                        inputBuffer.clear();
//                        short[] da = new short[readsize];
//                        System.arraycopy(audiodata, 0, da, 0, readsize);
//                        Log.e("ssfsdfsdfsf", bufferSizeInBytes + "--------" + da.length);
                        short[] shotdata = WebRTCNoiseSuppression.processpcm(audiodata, sampleRateInHz, readsize, 3);
                        byte[] dat = shortTobyte(shotdata, shotdata.length*2);
//                        byte[] dat = shortTobyte(audiodata, audiodata.length*2);
                        inputBuffer.put(dat);
                        inputBuffer.limit(dat.length);
                        mAudioEncoder.queueInputBuffer(inputBufferIndex, 0, dat.length, 0, 0);
                    }
                    MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
                    int outputBufferIndex = mAudioEncoder.dequeueOutputBuffer(bufferInfo, 0);
                    while (outputBufferIndex >= 0) {
                        ByteBuffer outputBuffer = mAudioEncoder.getOutputBuffer(outputBufferIndex);
                        outputBuffer.position(bufferInfo.offset);
                        outputBuffer.limit(bufferInfo.offset + bufferInfo.size);
                        byte[] chunkAudio = new byte[bufferInfo.size + 7];// 7 is ADTS size
                        addADTStoPacket(chunkAudio, chunkAudio.length);
                        outputBuffer.get(chunkAudio, 7, bufferInfo.size);
                        outputBuffer.position(bufferInfo.offset);
                        fos.write(chunkAudio);
                        mAudioEncoder.releaseOutputBuffer(outputBufferIndex, false);
                        outputBufferIndex = mAudioEncoder.dequeueOutputBuffer(bufferInfo, 0);
                    }

                }
            }
            if (audioRecord != null) {
                audioRecord.stop();
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {

            try {
                if (fos != null) {
                    fos.close();
                }
            } catch (IOException e) {
                e.printStackTrace();
            } finally {
                fos = null;
            }
        }
    }


    /**
     * 添加ADTS头
     *
     * @param packet
     * @param packetLen
     */
    private void addADTStoPacket(byte[] packet, int packetLen) {
        int profile = 2;  //AAC LC
        int freqIdx = 4;  //44.1KHz
        int chanCfg = 1;  //CPE
        // fill in ADTS data
        packet[0] = (byte) 0xFF;
        packet[1] = (byte) 0xF9;
        packet[2] = (byte) (((profile - 1) << 6) + (freqIdx << 2) + (chanCfg >> 2));
        packet[3] = (byte) (((chanCfg & 3) << 6) + (packetLen >> 11));
        packet[4] = (byte) ((packetLen & 0x7FF) >> 3);
        packet[5] = (byte) (((packetLen & 7) << 5) + 0x1F);
        packet[6] = (byte) 0xFC;
    }


}
