package com.qttaudio.example.utils;

import com.qttaudio.sdk.channel.AudioDataObserver;
import com.qttaudio.sdk.channel.ChannelFactory;

import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.text.SimpleDateFormat;
import java.util.Date;

/**
 * 演示通过调用AudioEngine setRecordDataObsever和setPlaybackDataObsever，
 * 获取到mic或者speaker的原始声音数据，一般用途有两点：
 * 1、保存或者其他扩展用途
 * 2、对原始数据可以进行修改，主要是变声等场景使用
 *
 * WavRecorder主要演示用途1，将数据保存为wav文件；关于用途2，示例如何修改Mic原始数据
 *
 * 核心用法就是继承DataObserver，通过重写onData方法获取到数据，或者修改数据
 * */
public class AudioDataObserverImpl implements AudioDataObserver {
    public static int engine_samplerate = 16000;

    private boolean recordNeedObserve = false;
    private boolean playbackNeedObserve = false;

    private String micFileName;
    private RandomAccessFile micRaf;
    private int micWriteBytes = 0;

    private String spekaerFileName;
    private RandomAccessFile speakerRaf;
    private int speakerWriteBytes = 0;


    private Boolean isRecording = false;
    private final int wavHeaderLength = 44;

    private final static byte[] RIFF_array = { 0x52, 0x49, 0x46, 0x46 };
    private final static byte[] WAVE_array = { 0x57, 0x41, 0x56, 0x45 };
    private final static byte[] fmt_array = { 0x66, 0x6D, 0x74, 0x20 };
    private final static byte[] data_array = { 0x64, 0x61, 0x74, 0x61 };

    public int startRecord(boolean record, boolean playback, String path) {
        synchronized (isRecording) {
            if (isRecording)
                return 0;
            mkDirectory(path);
            SimpleDateFormat format0 = new SimpleDateFormat("yyMMdd-HHmmss");
            String time = format0.format(new Date().getTime());//这个就是把时间戳经过处理得到期望格式的时间
            recordNeedObserve = record;
            playbackNeedObserve = playback;
            try {
                if (recordNeedObserve) {
                    micFileName = path+"/"+time+"-mic.wav";
                    micRaf = new RandomAccessFile(micFileName, "rw");
                    micRaf.seek(wavHeaderLength);
                }

                if (playbackNeedObserve) {
                    spekaerFileName = path+"/"+time+"-speaker.wav";
                    speakerRaf = new RandomAccessFile(spekaerFileName, "rw");
                    speakerRaf.seek(wavHeaderLength);
                }

                ChannelFactory.GetChannelInstance().setAudioDataObserver(this, recordNeedObserve, playbackNeedObserve);
                isRecording = true;
            } catch (FileNotFoundException e) {
                e.printStackTrace();
                return -1;
            } catch (IOException e) {
                e.printStackTrace();
                return -1;
            }
            return 0;
        }
    }
    public static boolean mkDirectory(String path) {
        File file = null;
        try {
            file = new File(path);
            if (!file.exists()) {
                return file.mkdirs();
            } else {
                return false;
            }
        } catch (Exception e) {
        } finally {
            file = null;
        }
        return false;
    }

    public void stopRecord() {
        synchronized (isRecording) {
            if (!isRecording)
                return;
            isRecording = false;

            ChannelFactory.GetChannelInstance().setAudioDataObserver(null, false, false);

            if (recordNeedObserve) {
                writeHeader(micRaf, micWriteBytes);
                micRaf = null;
            }
            if (playbackNeedObserve) {
                writeHeader(speakerRaf, speakerWriteBytes);
                speakerRaf = null;
            }
        }
    }

    private void writeHeader(RandomAccessFile raf, int writeBytes) {
        try {
            int chunk = 2;
            raf.seek(0);
            raf.write(RIFF_array);
            raf.write(intToByteArray(writeBytes +wavHeaderLength));
            raf.write(WAVE_array);

            // Write format header
            raf.write(fmt_array);
            raf.write(intToByteArray(16));
            raf.write(shortToByteArray(1));
            raf.write(shortToByteArray(1));
            raf.write(intToByteArray(engine_samplerate));
            raf.write(intToByteArray(engine_samplerate * chunk));
            raf.write(shortToByteArray(2));
            raf.write(shortToByteArray(16));

            // Write data section
            raf.write(data_array);
            raf.write(intToByteArray(writeBytes));
            raf.close();
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    /**
     * att. Little Endian
     *
     * @param value
     * @return
     */
    private byte[] intToByteArray(int value) {
        return new byte[] { (byte) value, (byte) (value >>> 8),
                (byte) (value >>> 16), (byte) (value >>> 24)};
    }


    /**
     * att. Little Endian
     *
     * @param value
     * @return
     */
    private byte[] shortToByteArray(int value) {
        return new byte[] { (byte) value, (byte) (value >>> 8) };
    }

    @Override
    public boolean onRecordData(ByteBuffer buffer, int len) {
        //1、获取原始数据，进行保存
        synchronized (isRecording) {
            if (isRecording && micRaf != null) {
                byte[] buf = new byte[len];
                buffer.position(0);
                buffer.get(buf);
                try {
                    micRaf.write(buf);
                    micWriteBytes +=len;
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        }
        //2、也可以将Mic的原始数据（经过降噪，回声消除的干净声音），交给百度、讯飞等语音识别作智能识别使用


        //3、如果要修改采集的原始数据，比如用来变声等用途
        //3.1、首先获取到原始数据，就是从ByteBuffer里获取[0-len]的数据，参见上面示例
        //byte[] buf = new byte[len];
        //buffer.position(0);
        //buffer.get(buf);
        //3.2、用户修改数据
        //modify(buf);
        //3.3、将修改的数据重新放入ByteBuffer[0-len]即可
        //buffer.position(0);
        //buffer.put(buf);
        return false;
    }

    @Override
    public boolean onPlaybackData(ByteBuffer buffer, int len) {
        //1、获取原始数据，进行保存
        synchronized (isRecording) {
            if (isRecording && speakerRaf != null) {
                byte[] buf = new byte[len];
                buffer.position(0);
                buffer.get(buf);
                try {
                    speakerRaf.write(buf);
                    speakerWriteBytes +=len;
                } catch (IOException e) {
                    e.printStackTrace();
                }
            }
        }
        //2、也可以将Mic的原始数据（经过降噪，回声消除的干净声音），交给百度、讯飞等语音识别作智能识别使用


        //3、如果要修改采集的原始数据，比如用来变声等用途
        //3.1、首先获取到原始数据，就是从ByteBuffer里获取[0-len]的数据，参见上面示例
        //byte[] buf = new byte[len];
        //buffer.position(0);
        //buffer.get(buf);
        //3.2、用户修改数据
        //modify(buf);
        //3.3、将修改的数据重新放入ByteBuffer[0-len]即可
        //buffer.position(0);
        //buffer.put(buf);
        return false;
    }
}
