import Browser from "../browser/browser";
import {compress,encodePCM}  from '../transform/transform'

// AudioHandler构造函数参数格式
interface AudioHandlerConfig {
    outSampleBits: number,        // 输出采样位数(压缩音频的时候会用到)
    outSampleRate: number,        // 输出采样率(压缩音频的时候会用到)
    numberOfInputChannels: number,       // 值为整数，用于指定输入 node 的声道的数量，默认值是 2，最高能取 32.
    numberOfOutputChannels: number,       //值为整数，用于指定输出 node 的声道的数量，默认值是 2，最高能取 32.
    analyerFftSize:number,      //解析器的临时存储空间大小
    package:{//问号(?)表示这个参数是可选参数。（如果是感叹号(!)，说明这个参数不为空，是必选参数）
        chunkCount: number,        // 表示多少块音频为一个包。例如一箱苹果，数量为20个。其中20就是表示chunkount 
        chunkSize:number,          // 表示每块音频的大小。选择：值一般为0,1024,2048,4096,8192等 （只有当前音频数据到这个大小时，会触发onaudioprocess回调，进行实时数据传输）
    }
}

//音频内部数据
class AudioInfo{
    protected outSampleRate: number = 0;                        // 采样率
    protected outSampleBits: number = 0;                        // 采样位数
    protected lBuffer: Array<Float32Array> = [];              // pcm音频数据搜集器(左声道)
    protected rBuffer: Array<Float32Array> = [];              // pcm音频数据搜集器(右声道)
    protected leftRealTimeBuffer:Array<Float32Array> =[];         // 分包后的实时数据(右声道)
    protected rightRealTimeBuffer:Array<Float32Array> =[];        // 分包后的实时数据(右声道)
    protected realTimeTotalSize:number =0;                          //实时每次的总包大小
    protected maxVol:number = 0;                                     //当前最大音量
    protected vol:number = 0;                                         //当前音量
    protected realTimeAudioData:DataView = new DataView(new ArrayBuffer(0));           //实时的音频总数据
    protected totalData:DataView = new DataView(new ArrayBuffer(0));               //录音开始到结束的总数据
    protected totalDuration:number = 0;                                                //录音总时长
    protected totalSize: number = 0;                                                   // 录音文件总长度
    protected littleEdian: boolean = false;                                          // 是否是小端字节序
    protected fileByteTotalSize: number = 0;                                           // 录音总大小，byte为单位(需要totalSize计算)
    protected fileByteTotalRealTimeSize: number = 0;                                  // 实时录音总大小，byte为单位(需要totalSize计算)
}

interface AudioFunction{
    //开始录音
    startRecord():Promise<any>;
    //停止录音
    stopRecord():void;
    //暂停录音
    pause():void;
    //继续录音
    resume():void;
    //播放总录音
    playTotalRecord():void;
    //播放前 n秒前的录音
    playNRecord(seconds:number):void;
    //导出总录音 格式为 format(如mp3，wav,pcm)
    exportTotalRecord(format:string):void;
    //导出 n 秒前的录音 ,格式为 format
    exportNRecord(format:string,seconds:number):void;
    //销毁实例 包括stream
    destroyAudioHandlerInstance():void;
    //实时 将左右声道的二维数据 转为一维数据
    realTime2DconvertTo1D(leftBuffer:Array<Float32Array>,rightBuffer:Array<Float32Array>):{left:Float32Array,right:Float32Array};
}

export  class AudioHandler extends AudioInfo implements AudioFunction{//
   private audioContext: AudioContext|any;             //音频处理上下文，感叹号强调声明不为空，避免爆红。
   private audioHandlerConfig!:AudioHandlerConfig;  //音频处理需要的参数  
   private analyer!:AnalyserNode;                   //音频解析器
   private audioStream!:MediaStream;                //音频流对象
   private recorder!:ScriptProcessorNode;                            //音频数据采集处理器        
   private audioInput!:MediaStreamAudioSourceNode;  //音频流对象转为可操作的音频输入对象节点
   private needRecord: boolean = true;              // 由于safari问题，导致使用该方案代替disconnect/connect方案
   private biquadFilter!:BiquadFilterNode;          //双阶滤波器。作用：暂时不清晰
   private gainNode!:GainNode;                      //增益节点。作用：目前只知道可以调节音量（声音大小），其他作用暂时不清晰

   //回调接口（传入的数据是音频数据，有实时的数据和不是实时的数据）
    public onProgress!:(audioInfo:AudioInfo)=>void;


   /**
    * 构造默认数据
    * @param audioHandlerConfig 
    */
   constructor(audioHandlerConfig:AudioHandlerConfig){
       super();

       //设置 getUserMedia 版本兼容
       Browser.initUserMedia();

       //判断当前的客户端 端字节序
       this.littleEdian=Browser.isLittleEdian();
       
       //设置处理音频数据的默认配置
       let chunkSize = audioHandlerConfig.package == undefined ? 0:audioHandlerConfig.package.chunkSize;
       this.audioHandlerConfig = {
        //解析器的临时存储空间大小
        analyerFftSize: ~[0,1024,2048,4096,8192].indexOf(audioHandlerConfig.analyerFftSize)?chunkSize:0,
        // 输出采样数位 8, 16
        outSampleBits: ~[8, 16].indexOf(audioHandlerConfig.outSampleBits) ? audioHandlerConfig.outSampleBits : 16,
        // 输出采样率
        outSampleRate: ~[8000, 11025, 16000, 22050, 24000, 44100, 48000].indexOf(audioHandlerConfig.outSampleRate) ? audioHandlerConfig.outSampleRate : 0,
        // 声道数，1或2
        numberOfInputChannels: ~[1].indexOf(audioHandlerConfig.numberOfInputChannels) ? audioHandlerConfig.numberOfInputChannels : 1,
        numberOfOutputChannels: ~[1].indexOf(audioHandlerConfig.numberOfOutputChannels) ? audioHandlerConfig.numberOfOutputChannels : 1,
        //实时数据 分包配置
        package:{
            chunkCount: audioHandlerConfig.package == undefined ? 0:audioHandlerConfig.package.chunkCount,
            chunkSize:  ~ [0,1024,2048,4096,8192].indexOf(chunkSize)?chunkSize:0,
        }
    };
   }


   
   realTime2DconvertTo1D(leftBuffer: Float32Array[], rightBuffer: Float32Array[]): { left: Float32Array; right: Float32Array; } {
        let lData = new Float32Array(0) ,rData = new Float32Array(0), offset = 0;//offset一维的偏移量
        if(this.audioHandlerConfig.numberOfOutputChannels&&this.audioHandlerConfig.numberOfOutputChannels == 1){
            lData = new Float32Array(this.realTimeTotalSize / 2);
            for (let index = 0; index < this.leftRealTimeBuffer.length; index++) {
                let tmpData = this.leftRealTimeBuffer[index];
                lData.set(tmpData,offset);
                offset +=  tmpData.length;
            }
        }
        offset = 0;
        if(this.audioHandlerConfig.numberOfOutputChannels&&this.audioHandlerConfig.numberOfOutputChannels > 1){
            rData = new Float32Array(this.realTimeTotalSize / 2);
            for (let index = 0; index < this.rightRealTimeBuffer.length; index++) {
                let tmpData = this.rightRealTimeBuffer[index];
                rData.set(tmpData,offset);
                offset +=  tmpData.length;
            }
        }
        return {left:lData,right:rData};
    }

    startRecord(): Promise<any> {
        //检查当前是否已经开启了录音，如果是，则提示录音已经开启
        if(this.audioContext){
            return Promise.reject(new Error('录音已经被开启，不需要重复开启'));
        }
        /*初始化录音*/
        //实例化音频上下文
        this.audioContext = new (window.AudioContext || (window as any).webkitAudioContext)();
        //实例化音频解析器(音频数据过程处理器)
        this.analyer = this.audioContext.createAnalyser();
        //实例化二阶滤波器(音频数据过程处理器)
        this.biquadFilter = this.audioContext.createBiquadFilter();
        //实例化增益节点(音频数据过程处理器)
        this.gainNode = this.audioContext.createGain();
        //创建一个对音频数据进行采样的处理器
        let createScript = this.audioContext.createScriptProcessor || (this.audioContext as any).createJavaScriptNode
        //[param1,param2,param3]。
        //第一个参数表示音频数据采集大小。只有当前音频数据大小到达这个参数大小，才会触发回调方法 onaudioprocess
        //第二第三参数分别表示输入的声道数和输出的声道数
        this.recorder = createScript.apply(this.audioContext,[this.audioHandlerConfig.package.chunkSize
                                                            ,this.audioHandlerConfig.numberOfInputChannels,this.audioHandlerConfig.numberOfOutputChannels]);
        this.recorder.onaudioprocess = (e:AudioProcessingEvent)=>{
            let leftData:any = null,rightData:any = null,maxVol = 0 ,vol=0;
            //只有一个输出声道
            if(this.audioHandlerConfig.numberOfOutputChannels&&this.audioHandlerConfig.numberOfOutputChannels==1){
                //左声道数据
                leftData = e.inputBuffer.getChannelData(0);
                //累计左声道数据(pcm格式)
                this.lBuffer.push(new Float32Array(leftData));
                //实时性的数据（pcm格式）
                this.leftRealTimeBuffer = new Array(new Float32Array(leftData));
                //总音频大小
                this.totalSize += leftData.length;
                this.realTimeTotalSize += leftData.length;
            }
            //存在多个输出声道
            if (this.audioHandlerConfig.numberOfOutputChannels&&this.audioHandlerConfig.numberOfOutputChannels >= 2) {
                 //右声道数据
                 rightData = e.inputBuffer.getChannelData(1);
                 //累计右声道数据(pcm格式)
                 this.rBuffer.push(new Float32Array(rightData));
                 //实时性的数据（pcm格式）
                 this.rightRealTimeBuffer = new Array(new Float32Array(rightData));
                 //总音频大小
                 this.totalSize += rightData.length;
                 this.realTimeTotalSize += rightData.length;
            }

            //计算总字节录音大小
            this.fileByteTotalSize = Math.floor(this.totalSize/Math.max(this.audioContext.sampleRate/this.audioHandlerConfig.outSampleRate,1))
                                    *(this.audioHandlerConfig.outSampleBits/8);
            //计算实时数据总数据大小
            this.fileByteTotalSize = Math.floor(this.realTimeTotalSize/Math.max(this.audioContext.sampleRate/this.audioHandlerConfig.outSampleRate,1))
                                    *(this.audioHandlerConfig.outSampleBits/8);
            
            //计算音频百分比
            if(leftData){
                this.vol = Math.max(Math.max.apply(Math,leftData)*100,this.vol);
            }
            if(rightData){
                this.vol = Math.max(Math.max.apply(Math,rightData)*100,this.vol);
            }

            //计算录音总时长
            this.totalDuration += this.audioHandlerConfig.package.chunkSize / this.audioContext.sampleRate;

            //实时数据总大小 等于分包数据预期大小 就设置回调接口的入参
            let expectedRealTimeTotalSize = this.audioHandlerConfig.package.chunkCount*this.audioHandlerConfig.package.chunkSize;
            if(this.realTimeTotalSize == expectedRealTimeTotalSize){
                let realTimeData = this.realTime2DconvertTo1D(this.leftRealTimeBuffer,this.rightRealTimeBuffer);
                let realTimeCompressData = compress(realTimeData,this.audioContext.sampleRate,this.audioHandlerConfig.outSampleRate);
                 // 按采样位数重新编码
                 this.realTimeAudioData =  encodePCM(realTimeCompressData, this.audioHandlerConfig.outSampleBits, this.littleEdian);
                 //设置回调函数的入参数据
                 this.onProgress(this);
                 //清空实时数据大小记录值
                 this.realTimeTotalSize = 0;
            }
        }
        
        //开启麦克风
        const constraints = {
            audio: {
              echoCancellation: false,
              autoGainControl: false,
              noiseCancellation: false
            },
            video: false
        }
        const userMedia = navigator.mediaDevices.getUserMedia(constraints)
        .then((stream:MediaStream) => {
            // audioInput表示音频源节点 将stream转换为音频输入源节点
            this.audioInput = this.audioContext.createMediaStreamSource(stream);
            this.audioStream = stream;
           
        },// 报错丢给外部使用者catch，后期可在此处增加建议性提示
            error => {
            // 抛出异常
            new Error(error.name + " : " + error.message)
        } ).then(() => {
            //音频节点的连接
            this.audioInput.connect(this.analyer);
            this.analyer.connect(this.gainNode);
            this.gainNode.connect(this.biquadFilter);
            this.biquadFilter.connect(this.recorder);
            this.recorder.connect(this.audioContext.destination);
        });
        return userMedia;
    }

    stopRecord(): void {
        this.audioInput&&this.audioInput.disconnect();
        this.analyer&&this.analyer.disconnect();
        this.recorder&&this.recorder.disconnect();
        this.gainNode&&this.gainNode.disconnect();
        this.biquadFilter&&this.biquadFilter.disconnect();
        this.audioContext = undefined;
    }
    pause(): void {
        this.audioContext&&this.audioContext.suspend();
    }
    resume(): void {
        this.audioContext&&this.audioContext.resume();
    }
    playTotalRecord(): void {
        throw new Error("Method not implemented.");
    }
    playNRecord(seconds: number): void {
        throw new Error("Method not implemented.");
    }
    exportTotalRecord(format: string): void {
        throw new Error("Method not implemented.");
    }
    exportNRecord(format: string, seconds: number): void {
        throw new Error("Method not implemented.");
    }
    destroyAudioHandlerInstance(): void {
        throw new Error("Method not implemented.");
    }

}
