/*
 *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
 *
 *  Use of this source code is governed by a BSD-style license
 *  that can be found in the LICENSE file in the root of the source
 *  tree. An additional intellectual property rights grant can be found
 *  in the file PATENTS.  All contributing project authors may
 *  be found in the AUTHORS file in the root of the source tree.
 */

package org.webrtc.audio;

import android.annotation.TargetApi;
import android.content.Context;
import android.media.AudioDeviceInfo;
import android.media.AudioFormat;
import android.media.AudioManager;
import android.media.AudioRecord;
import android.media.AudioRecordingConfiguration;
import android.media.MediaRecorder.AudioSource;
import android.os.Build;
import android.os.Process;
import android.util.Log;

import org.webrtc.CalledByNative;
import org.webrtc.Logging;
import org.webrtc.ThreadUtils;
import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordErrorCallback;
import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordStartErrorCode;
import org.webrtc.audio.JavaAudioDeviceModule.AudioRecordStateCallback;
import org.webrtc.audio.JavaAudioDeviceModule.SamplesReadyCallback;

import java.nio.ByteBuffer;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;

import androidx.annotation.Nullable;
import androidx.annotation.RequiresApi;

class WebRtcAudioRecord {
    private static final String TAG = "WebRtcAudioRecordExternal";

    // Requested size of each recorded buffer provided to the client.
    private static final int CALLBACK_BUFFER_SIZE_MS = 10;

    // Average number of callbacks per second.
    private static final int BUFFERS_PER_SECOND = 1000 / CALLBACK_BUFFER_SIZE_MS;

    // We ask for a native buffer size of BUFFER_SIZE_FACTOR * (minimum required
    // buffer size). The extra space is allocated to guard against glitches under
    // high load.
    private static final int BUFFER_SIZE_FACTOR = 2;

    // The AudioRecordJavaThread is allowed to wait for successful call to join()
    // but the wait times out afther this amount of time.
    private static final long AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS = 2000;

    public static final int DEFAULT_AUDIO_SOURCE = AudioSource.VOICE_COMMUNICATION;

    // Default audio data format is PCM 16 bit per sample.
    // Guaranteed to be supported by all devices.
    public static final int DEFAULT_AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT;

    // Indicates AudioRecord has started recording audio.
    private static final int AUDIO_RECORD_START = 0;

    // Indicates AudioRecord has stopped recording audio.
    private static final int AUDIO_RECORD_STOP = 1;


    private final Context context;
    private final AudioManager audioManager;
    private final int audioSource;
    private final int audioFormat;

    private long nativeAudioRecord;

    private @Nullable
    ByteBuffer byteBuffer;

    private @Nullable
    MyAudioRecordSource audioRecord;
    private @Nullable
    AudioRecordThread audioThread;
    private @Nullable
    AudioDeviceInfo preferredDevice;

    private final ScheduledExecutorService executor;
    private @Nullable
    ScheduledFuture<String> future;

    private final AtomicReference<Boolean> audioSourceMatchesRecordingSessionRef =
            new AtomicReference<>();

    private final @Nullable
    AudioRecordErrorCallback errorCallback;
    private final @Nullable
    AudioRecordStateCallback stateCallback;
    private final @Nullable
    SamplesReadyCallback audioSamplesReadyCallback;
    private final boolean isAcousticEchoCancelerSupported;
    private final boolean isNoiseSuppressorSupported;


    private class AudioRecordThread extends Thread {

        private volatile boolean keepAlive = true;

        public AudioRecordThread(String name) {
            super(name);
        }


        @Override
        public void run() {
            Process.setThreadPriority(Process.THREAD_PRIORITY_URGENT_AUDIO);
            Logging.d(TAG, "AudioRecordThread" + WebRtcAudioUtils.getThreadInfo());
            doAudioRecordStateCallback(AUDIO_RECORD_START);
            int aa = byteBuffer.capacity();
            Log.i("223223", "capacity:" + aa);
            long lastTime = System.nanoTime();
            while (keepAlive) {
                byte[] feedingAudioData = audioRecord.read();
                if (feedingAudioData == null) {
                    try {
                        Thread.sleep(10);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                    continue;
                }
                byteBuffer.clear();
                int offset = 0;
                int length;
                while (offset < feedingAudioData.length) {
                    length = Math.min(byteBuffer.capacity(), feedingAudioData.length - offset);
                    byteBuffer.position(0);
                    byteBuffer.put(Arrays.copyOfRange(feedingAudioData, offset, length + offset));
                    offset += length;
                    if (keepAlive) {
                        nativeDataIsRecorded(nativeAudioRecord, length);
                    }
                }
            }

            try {
                if (audioRecord != null) {
                    audioRecord.stop();
                    doAudioRecordStateCallback(AUDIO_RECORD_STOP);
                }
            } catch (IllegalStateException e) {
                Logging.e(TAG, "AudioRecord.stop failed: " + e.getMessage());
            }
        }

        public void stopThread() {
            Logging.d(TAG, "stopThread");
            keepAlive = false;
        }
    }

    @CalledByNative
    WebRtcAudioRecord(Context context, AudioManager audioManager) {
        this(context, newDefaultScheduler() /* scheduler */, audioManager, DEFAULT_AUDIO_SOURCE,
                DEFAULT_AUDIO_FORMAT, null /* errorCallback */, null /* stateCallback */,
                null /* audioSamplesReadyCallback */, WebRtcAudioEffects.isAcousticEchoCancelerSupported(),
                WebRtcAudioEffects.isNoiseSuppressorSupported());
    }

    public WebRtcAudioRecord(Context context, ScheduledExecutorService scheduler,
                             AudioManager audioManager, int audioSource, int audioFormat,
                             @Nullable AudioRecordErrorCallback errorCallback,
                             @Nullable AudioRecordStateCallback stateCallback,
                             @Nullable SamplesReadyCallback audioSamplesReadyCallback,
                             boolean isAcousticEchoCancelerSupported, boolean isNoiseSuppressorSupported) {
        if (isAcousticEchoCancelerSupported && !WebRtcAudioEffects.isAcousticEchoCancelerSupported()) {
            throw new IllegalArgumentException("HW AEC not supported");
        }
        if (isNoiseSuppressorSupported && !WebRtcAudioEffects.isNoiseSuppressorSupported()) {
            throw new IllegalArgumentException("HW NS not supported");
        }
        this.context = context;
        this.executor = scheduler;
        this.audioManager = audioManager;
        this.audioSource = audioSource;
        this.audioFormat = audioFormat;
        this.errorCallback = errorCallback;
        this.stateCallback = stateCallback;
        this.audioSamplesReadyCallback = audioSamplesReadyCallback;
        this.isAcousticEchoCancelerSupported = isAcousticEchoCancelerSupported;
        this.isNoiseSuppressorSupported = isNoiseSuppressorSupported;
        Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
    }

    @CalledByNative
    public void setNativeAudioRecord(long nativeAudioRecord) {
        this.nativeAudioRecord = nativeAudioRecord;
    }

    @CalledByNative
    boolean isAcousticEchoCancelerSupported() {
        return isAcousticEchoCancelerSupported;
    }

    @CalledByNative
    boolean isNoiseSuppressorSupported() {
        return isNoiseSuppressorSupported;
    }

    @CalledByNative
    boolean isAudioConfigVerified() {
        return audioSourceMatchesRecordingSessionRef.get() != null;
    }

    @CalledByNative
    boolean isAudioSourceMatchingRecordingSession() {
        Boolean audioSourceMatchesRecordingSession = audioSourceMatchesRecordingSessionRef.get();
        if (audioSourceMatchesRecordingSession == null) {
            Logging.w(TAG, "Audio configuration has not yet been verified");
            return false;
        }
        return audioSourceMatchesRecordingSession;
    }

    @CalledByNative
    private boolean enableBuiltInAEC(boolean enable) {
        return false;
    }

    @CalledByNative
    private boolean enableBuiltInNS(boolean enable) {
        return false;
    }

    @CalledByNative
    private int initRecording(int sampleRate, int channels) {
        final int bytesPerFrame = channels * getBytesPerSample(audioFormat);
        final int framesPerBuffer = sampleRate / BUFFERS_PER_SECOND;
        byteBuffer = ByteBuffer.allocateDirect(bytesPerFrame * framesPerBuffer);
        if (!(byteBuffer.hasArray())) {
            reportWebRtcAudioRecordInitError("ByteBuffer does not have backing array.");
            return -1;
        }
        Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
        nativeCacheDirectBufferAddress(nativeAudioRecord, byteBuffer);
        final int channelConfig = channelCountToConfiguration(channels);
        int minBufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
        if (minBufferSize == AudioRecord.ERROR || minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
            reportWebRtcAudioRecordInitError("AudioRecord.getMinBufferSize failed: " + minBufferSize);
            return -1;
        }
        int bufferSizeInBytes = Math.max(BUFFER_SIZE_FACTOR * minBufferSize, byteBuffer.capacity());
        audioRecord = createMyAudioRecord(audioSource, sampleRate, channelConfig, audioFormat, bufferSizeInBytes);
        return framesPerBuffer;
    }

    private MyAudioRecordSource createMyAudioRecord(int audioSource, int sampleRate, int channelConfig, int audioFormat, int bufferSizeInBytes) {
        return new MyAudioRecordSource(audioSource, sampleRate, channelConfig, audioFormat, bufferSizeInBytes);
    }

    /**
     * Prefer a specific {@link AudioDeviceInfo} device for recording. Calling after recording starts
     * is valid but may cause a temporary interruption if the audio routing changes.
     */
    @RequiresApi(Build.VERSION_CODES.M)
    @TargetApi(Build.VERSION_CODES.M)
    void setPreferredDevice(@Nullable AudioDeviceInfo preferredDevice) {
        Logging.d(
                TAG, "setPreferredDevice " + (preferredDevice != null ? preferredDevice.getId() : null));
        this.preferredDevice = preferredDevice;
        if (audioRecord != null) {
            if (!audioRecord.setPreferredDevice(preferredDevice)) {
                Logging.e(TAG, "setPreferredDevice failed");
            }
        }
    }

    @CalledByNative
    private boolean startRecording() {
        Logging.d(TAG, "startRecording");
        assertTrue(audioThread == null);
        if (audioThread != null) {
            return false;
        }

        audioThread = new AudioRecordThread("AudioRecordJavaThread");
        audioThread.start();
//    scheduleLogRecordingConfigurationsTask(audioRecord);
        return true;
    }

    @CalledByNative
    private boolean stopRecording() {
        Logging.d(TAG, "stopRecording");
        assertTrue(audioThread != null);
        if (future != null) {
            if (!future.isDone()) {
                // Might be needed if the client calls startRecording(), stopRecording() back-to-back.
                future.cancel(true /* mayInterruptIfRunning */);
            }
            future = null;
        }
        audioThread.stopThread();
        if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
            Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
            WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
        }
        audioThread = null;
//    effects.release();
        releaseAudioResources();
        return true;
    }

    @TargetApi(Build.VERSION_CODES.N)
    // Checks the number of active recording sessions and logs the states of all active sessions.
    // Returns number of active sessions. Note that this could occur on arbituary thread.
    private int logRecordingConfigurations(AudioRecord audioRecord, boolean verifyAudioConfig) {
        if (Build.VERSION.SDK_INT < Build.VERSION_CODES.N) {
            Logging.w(TAG, "AudioManager#getActiveRecordingConfigurations() requires N or higher");
            return 0;
        }
        if (audioRecord == null) {
            return 0;
        }

        // Get a list of the currently active audio recording configurations of the device (can be more
        // than one). An empty list indicates there is no recording active when queried.
        List<AudioRecordingConfiguration> configs = audioManager.getActiveRecordingConfigurations();
        final int numActiveRecordingSessions = configs.size();
        Logging.d(TAG, "Number of active recording sessions: " + numActiveRecordingSessions);
        if (numActiveRecordingSessions > 0) {
            logActiveRecordingConfigs(audioRecord.getAudioSessionId(), configs);
            if (verifyAudioConfig) {
                // Run an extra check to verify that the existing audio source doing the recording (tied
                // to the AudioRecord instance) is matching what the audio recording configuration lists
                // as its client parameters. If these do not match, recording might work but under invalid
                // conditions.
                audioSourceMatchesRecordingSessionRef.set(
                        verifyAudioConfig(audioRecord.getAudioSource(), audioRecord.getAudioSessionId(),
                                audioRecord.getFormat(), audioRecord.getRoutedDevice(), configs));
            }
        }
        return numActiveRecordingSessions;
    }

    // Helper method which throws an exception  when an assertion has failed.
    private static void assertTrue(boolean condition) {
        if (!condition) {
            throw new AssertionError("Expected condition to be true");
        }
    }

    private int channelCountToConfiguration(int channels) {
        return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
    }

    private native void nativeCacheDirectBufferAddress(long nativeAudioRecordJni, ByteBuffer byteBuffer);

    private native void nativeDataIsRecorded(long nativeAudioRecordJni, int bytes);

    // Sets all recorded samples to zero if |mute| is true, i.e., ensures that
    // the microphone is muted.
    public void setMicrophoneMute(boolean mute) {
        Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
    }

    private void releaseAudioResources() {
        Logging.d(TAG, "releaseAudioResources");
        if (audioRecord != null) {
            audioRecord.release();
            audioRecord = null;
        }
        audioSourceMatchesRecordingSessionRef.set(null);
    }

    private void reportWebRtcAudioRecordInitError(String errorMessage) {
        Logging.e(TAG, "Init recording error: " + errorMessage);
        WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
        if (errorCallback != null) {
            errorCallback.onWebRtcAudioRecordInitError(errorMessage);
        }
    }

    private void reportWebRtcAudioRecordStartError(
            AudioRecordStartErrorCode errorCode, String errorMessage) {
        Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
        WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
//    logRecordingConfigurations(audioRecord, false /* verifyAudioConfig */);
        if (errorCallback != null) {
            errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
        }
    }

    private void reportWebRtcAudioRecordError(String errorMessage) {
        Logging.e(TAG, "Run-time recording error: " + errorMessage);
        WebRtcAudioUtils.logAudioState(TAG, context, audioManager);
        if (errorCallback != null) {
            errorCallback.onWebRtcAudioRecordError(errorMessage);
        }
    }

    private void doAudioRecordStateCallback(int audioState) {
        Logging.d(TAG, "doAudioRecordStateCallback: " + audioStateToString(audioState));
        if (stateCallback != null) {
            if (audioState == WebRtcAudioRecord.AUDIO_RECORD_START) {
                stateCallback.onWebRtcAudioRecordStart();
            } else if (audioState == WebRtcAudioRecord.AUDIO_RECORD_STOP) {
                stateCallback.onWebRtcAudioRecordStop();
            } else {
                Logging.e(TAG, "Invalid audio state");
            }
        }
    }

    // Reference from Android code, AudioFormat.getBytesPerSample. BitPerSample / 8
    // Default audio data format is PCM 16 bits per sample.
    // Guaranteed to be supported by all devices
    private static int getBytesPerSample(int audioFormat) {
        switch (audioFormat) {
            case AudioFormat.ENCODING_PCM_8BIT:
                return 1;
            case AudioFormat.ENCODING_PCM_16BIT:
            case AudioFormat.ENCODING_IEC61937:
            case AudioFormat.ENCODING_DEFAULT:
                return 2;
            case AudioFormat.ENCODING_PCM_FLOAT:
                return 4;
            case AudioFormat.ENCODING_INVALID:
            default:
                throw new IllegalArgumentException("Bad audio format " + audioFormat);
        }
    }

    @TargetApi(Build.VERSION_CODES.N)
    private static boolean logActiveRecordingConfigs(
            int session, List<AudioRecordingConfiguration> configs) {
        assertTrue(!configs.isEmpty());
        final Iterator<AudioRecordingConfiguration> it = configs.iterator();
        Logging.d(TAG, "AudioRecordingConfigurations: ");
        while (it.hasNext()) {
            final AudioRecordingConfiguration config = it.next();
            StringBuilder conf = new StringBuilder();
            // The audio source selected by the client.
            final int audioSource = config.getClientAudioSource();
            conf.append("  client audio source=")
                    .append(WebRtcAudioUtils.audioSourceToString(audioSource))
                    .append(", client session id=")
                    .append(config.getClientAudioSessionId())
                    // Compare with our own id (based on AudioRecord#getAudioSessionId()).
                    .append(" (")
                    .append(session)
                    .append(")")
                    .append("\n");
            AudioFormat format = config.getFormat();
            conf.append("  Device AudioFormat: ")
                    .append("channel count=")
                    .append(format.getChannelCount())
                    .append(", channel index mask=")
                    .append(format.getChannelIndexMask())
                    // Only AudioFormat#CHANNEL_IN_MONO is guaranteed to work on all devices.
                    .append(", channel mask=")
                    .append(WebRtcAudioUtils.channelMaskToString(format.getChannelMask()))
                    .append(", encoding=")
                    .append(WebRtcAudioUtils.audioEncodingToString(format.getEncoding()))
                    .append(", sample rate=")
                    .append(format.getSampleRate())
                    .append("\n");
            // Audio format at which the client application is recording audio.
            format = config.getClientFormat();
            conf.append("  Client AudioFormat: ")
                    .append("channel count=")
                    .append(format.getChannelCount())
                    .append(", channel index mask=")
                    .append(format.getChannelIndexMask())
                    // Only AudioFormat#CHANNEL_IN_MONO is guaranteed to work on all devices.
                    .append(", channel mask=")
                    .append(WebRtcAudioUtils.channelMaskToString(format.getChannelMask()))
                    .append(", encoding=")
                    .append(WebRtcAudioUtils.audioEncodingToString(format.getEncoding()))
                    .append(", sample rate=")
                    .append(format.getSampleRate())
                    .append("\n");
            // Audio input device used for this recording session.
            final AudioDeviceInfo device = config.getAudioDevice();
            if (device != null) {
                assertTrue(device.isSource());
                conf.append("  AudioDevice: ")
                        .append("type=")
                        .append(WebRtcAudioUtils.deviceTypeToString(device.getType()))
                        .append(", id=")
                        .append(device.getId());
            }
            Logging.d(TAG, conf.toString());
        }
        return true;
    }

    // Verify that the client audio configuration (device and format) matches the requested
    // configuration (same as AudioRecord's).
    @TargetApi(Build.VERSION_CODES.N)
    private static boolean verifyAudioConfig(int source, int session, AudioFormat format,
                                             AudioDeviceInfo device, List<AudioRecordingConfiguration> configs) {
        assertTrue(!configs.isEmpty());
        final Iterator<AudioRecordingConfiguration> it = configs.iterator();
        while (it.hasNext()) {
            final AudioRecordingConfiguration config = it.next();
            final AudioDeviceInfo configDevice = config.getAudioDevice();
            if (configDevice == null) {
                continue;
            }
            if ((config.getClientAudioSource() == source)
                    && (config.getClientAudioSessionId() == session)
                    // Check the client format (should match the format of the AudioRecord instance).
                    && (config.getClientFormat().getEncoding() == format.getEncoding())
                    && (config.getClientFormat().getSampleRate() == format.getSampleRate())
                    && (config.getClientFormat().getChannelMask() == format.getChannelMask())
                    && (config.getClientFormat().getChannelIndexMask() == format.getChannelIndexMask())
                    // Ensure that the device format is properly configured.
                    && (config.getFormat().getEncoding() != AudioFormat.ENCODING_INVALID)
                    && (config.getFormat().getSampleRate() > 0)
                    //  For the channel mask, either the position or index-based value must be valid.
                    && ((config.getFormat().getChannelMask() != AudioFormat.CHANNEL_INVALID)
                    || (config.getFormat().getChannelIndexMask() != AudioFormat.CHANNEL_INVALID))
                    && checkDeviceMatch(configDevice, device)) {
                Logging.d(TAG, "verifyAudioConfig: PASS");
                return true;
            }
        }
        Logging.e(TAG, "verifyAudioConfig: FAILED");
        return false;
    }

    @TargetApi(Build.VERSION_CODES.N)
    // Returns true if device A parameters matches those of device B.
    // TODO(henrika): can be improved by adding AudioDeviceInfo#getAddress() but it requires API 29.
    private static boolean checkDeviceMatch(AudioDeviceInfo devA, AudioDeviceInfo devB) {
        return ((devA.getId() == devB.getId() && (devA.getType() == devB.getType())));
    }

    private static String audioStateToString(int state) {
        switch (state) {
            case WebRtcAudioRecord.AUDIO_RECORD_START:
                return "START";
            case WebRtcAudioRecord.AUDIO_RECORD_STOP:
                return "STOP";
            default:
                return "INVALID";
        }
    }

    private static final AtomicInteger nextSchedulerId = new AtomicInteger(0);

    static ScheduledExecutorService newDefaultScheduler() {
        AtomicInteger nextThreadId = new AtomicInteger(0);
        return Executors.newScheduledThreadPool(0, new ThreadFactory() {
            /**
             * Constructs a new {@code Thread}
             */
            @Override
            public Thread newThread(Runnable r) {
                Thread thread = Executors.defaultThreadFactory().newThread(r);
                thread.setName(String.format("WebRtcAudioRecordScheduler-%s-%s",
                        nextSchedulerId.getAndIncrement(), nextThreadId.getAndIncrement()));
                return thread;
            }
        });
    }


    public void addPcmData(byte[] pcm) {
        if (audioRecord != null) {
            audioRecord.addData(pcm);
        }
    }
}
