import common from '@ohos.app.ability.common';
import audio from '@ohos.multimedia.audio';
import { BusinessError } from '@ohos.base';
import fs from '@ohos.file.fs';
import { CommonInterface, Options } from './InterfaceType';
import { countDownLatch } from './index';
import { connectWebSocket } from './connectWebSocket';
import media from '@ohos.multimedia.media';
import { speechRecognizer } from '@kit.CoreSpeechKit';
import promptAction from '@ohos.promptAction';

let iatWS;
// 初始化参数
let audioStreamInfo: audio.AudioStreamInfo = {
  samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_16000, // 采样率
  channels: audio.AudioChannel.CHANNEL_1, // 通道
  sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采样格式
  encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 编码格式
}
let audioCapturerInfo: audio.AudioCapturerInfo = {
  source: audio.SourceType.SOURCE_TYPE_MIC, // 音源类型
  capturerFlags: 0 // 音频采集器标志
}
let audioCapturerOptions: audio.AudioCapturerOptions = {
  streamInfo: audioStreamInfo,
  capturerInfo: audioCapturerInfo
}
let timer = null

class MeetingRecording {
  bufferSize: number = 0
  endBufferSize: number = 0 // 用来记录语音识别的当前大小，避免识别少了数据
  duration: number = 0 // 记录每次是否重新初始化语音识别的功能
  sessionId: string = ''
  XSubjectToken: string = ''
  msg: string = ''
  audioFilePath: string = ''
  audioCapturer: audio.AudioCapturer
  asrEngine: speechRecognizer.SpeechRecognitionEngine
  sendMsg: (msg: string) => void;

  private avRecorder: media.AVRecorder | undefined = undefined;
  private avProfile: media.AVRecorderProfile = {
    audioBitrate: 100000, // 音频比特率
    audioChannels: 1, // 音频声道数
    audioCodec: media.CodecMimeType.AUDIO_AAC, // 音频编码格式，当前只支持aac
    audioSampleRate: 16000, // 音频采样率
    fileFormat: media.ContainerFormatType.CFT_MPEG_4A, // 封装格式，当前只支持m4a
  };
  private avConfig: media.AVRecorderConfig = {
    audioSourceType: media.AudioSourceType.AUDIO_SOURCE_TYPE_MIC, // 音频输入源，这里设置为麦克风
    profile: this.avProfile,
    url: 'fd://35', // 参考应用文件访问与管理开发示例新建并读写一个文件
  };

  constructor(path, sessionId, XSubjectToken, sendMsg, init?) {
    this.sessionId = sessionId
    this.XSubjectToken = XSubjectToken
    this.audioFilePath = path + `/${this.sessionId}.mp3`;
    this.sendMsg = sendMsg
    this.initAudioCapturer(init)
    // this.startRecordingProcess()
  }

  // 初始化录音的配置
  initAudioCapturer (init) {
    audio.createAudioCapturer(audioCapturerOptions, (err, capturer) => { // 创建AudioCapturer实例
      if (err) {
        // console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`);
        promptAction.showToast({
          message: '初始化失败'
        })
        return;
      }
      this.audioCapturer = capturer;
      this.createByCallback()
      // this.startMeeting()
      init && init()
      if (this.audioCapturer !== undefined) {
        (this.audioCapturer as audio.AudioCapturer).on('readData', async (buffer: ArrayBuffer) => {
          let file: fs.File = fs.openSync(this.audioFilePath, fs.OpenMode.READ_WRITE | fs.OpenMode.CREATE);
          let options: Options = {
            offset: this.bufferSize,
            length: buffer.byteLength
          }

          fs.writeSync(file.fd, buffer, options);
          this.bufferSize += buffer.byteLength;
        });
      }
    });
  }

  // 初始化语音识别
  createByCallback() {
    // 设置创建引擎参数
    let extraParam: Record<string, Object> = {"locate": "CN", "recognizerMode": "short"};
    let initParamsInfo: speechRecognizer.CreateEngineParams = {
      language: 'zh-CN',
      online: 1
    };

    // 调用createEngine方法
    speechRecognizer.createEngine(initParamsInfo, async (err: BusinessError, speechRecognitionEngine:
      speechRecognizer.SpeechRecognitionEngine) => {
      if (!err) {
        // console.info('createEngine is succeeded');
        // 接收创建引擎的实例
        this.asrEngine = speechRecognitionEngine;
        this.setListener()
        this.startListening()
        // this.writeAudio(this.endBufferSize)
      } else {
        // 无法创建引擎时返回错误码1002200001，原因：语种不支持、模式不支持、初始化超时、资源不存在等导致创建引擎失败
        // 无法创建引擎时返回错误码1002200006，原因：引擎正在忙碌中，一般多个应用同时调用语音识别引擎时触发
        // 无法创建引擎时返回错误码1002200008，原因：引擎正在销毁中
        promptAction.showToast({
          message: '语音识别有问题'
        })
        // console.error("errCode: " + err.code +  " errMessage: " + err.message);
      }
    });
  }

  // 开始会议
  async startMeeting () {
    // await this.avRecorder.resume()
    // this.createByCallback()

    let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
    if (stateGroup.indexOf((this.audioCapturer as audio.AudioCapturer).state.valueOf()) === -1) {
      return;
    }
    // 启动采集
    (this.audioCapturer as audio.AudioCapturer).start((err: BusinessError) => {
      if (err) {
        // console.error('Capturer start failed.');
        promptAction.showToast({
          message: '启动失败'
        })

      } else {
        this.writeAudio(this.endBufferSize)
        // console.info('Capturer start success.');\
        // iatWS = connectWebSocket((type, message?) => {
        //   if (type === 'open') {
        //     this.writeAudio(this.endBufferSize)
        //   }
        // })
      }
    });
  }

  // 暂停会议
  stop() {
    // this.avRecorder.pause();
    // this.asrEngine && this.asrEngine.cancel(this.sessionId);
    // this.asrEngine && this.asrEngine.shutdown();
    if (this.audioCapturer !== undefined) {
      // 只有采集器状态为STATE_RUNNING或STATE_PAUSED的时候才可以停止
      if ((this.audioCapturer as audio.AudioCapturer).state.valueOf() !== audio.AudioState.STATE_RUNNING && (this.audioCapturer as audio.AudioCapturer).state.valueOf() !== audio.AudioState.STATE_PAUSED) {
        // console.info('Capturer is not running or paused');
        return;
      }

      //停止采集
      (this.audioCapturer as audio.AudioCapturer).stop((err: BusinessError) => {
        if (err) {
          promptAction.showToast({
            message: '暂停失败'
          })
          // console.error('Capturer stop failed.');
        } else {
          // this.writeAudio(this.bufferSize)
          // countDownLatch(1)
          // this.duration = 0
          // this.asrEngine.shutdown();
        }
      });
    }
  }

  // 结束会议
  async release () {
    // await this.avRecorder.release()
    // this.asrEngine && this.asrEngine.cancel(this.sessionId);
    // this.asrEngine && this.asrEngine.shutdown();
    // this.asrEngine = null
    // iatWS.close();
    if (this.endBufferSize < this.bufferSize) {
      // 如果还有没识别成功的，继续识别，等到最后识别完
    }
    if (this.audioCapturer !== undefined) {
      // 采集器状态不是STATE_RELEASED或STATE_NEW状态，才能release
      if ((this.audioCapturer as audio.AudioCapturer).state.valueOf() === audio.AudioState.STATE_RELEASED || (this.audioCapturer as audio.AudioCapturer).state.valueOf() === audio.AudioState.STATE_NEW) {
        console.info('Capturer already released');
        return;
      }

      //释放资源
      (this.audioCapturer as audio.AudioCapturer).release((err: BusinessError) => {
        if (err) {
          console.error('Capturer release failed.');
        } else {
          console.info('Capturer release success.');
          this.asrEngine.finish(this.sessionId);
        }
      });
    }
  }


  // 监听语音识别
  setListener () {
    let that = this
    let setListener: speechRecognizer.RecognitionListener = {
      // 开始识别成功回调
      onStart(sessionId: string, eventMessage: string) {
        // console.info("onStart sessionId: " + sessionId + "eventMessage: " + eventMessage);
      },
      // 事件回调
      onEvent(sessionId: string, eventCode: number, eventMessage: string) {
        // console.info("onEvent sessionId: " + sessionId + "eventCode: " + eventCode + "eventMessage: " + eventMessage);
      },
      // 识别结果回调，包括中间结果和最终结果
      onResult(sessionId: string, eventMessage: speechRecognizer.SpeechRecognitionResult) {
        if (eventMessage.result) {
          that.msg = eventMessage.result
        }
      },
      //识别完成回调
      onComplete(sessionId: string, eventMessage: string) {
        that.duration = 0
        if (that.msg) {
          that.sendMsg(that.msg)
        }
        that.msg = ''
        // that.asrEngine.shutdown();
        clearInterval(timer)
        that.startListening()
      },
      // 错误回调，错误码通过本方法返回
      // 如：返回错误码1002200006，识别引擎正忙，引擎正在识别中
      // 更多错误码请参考错误码参考
      onError(sessionId: string, errorCode: number, errorMessage: string) {
        console.error("onError sessionId: " + sessionId + "errorCode: " + errorCode + "errorMessage: " + errorMessage, that.duration);
        // that.startListening()
      },
    }
    // 设置回调
    this.asrEngine.setListener(setListener);
  }

  // 开始识别语音
  startListening () {
    let extraParam: Record<string, Object> = {
      "recognitionMode": 0,
      "maxAudioDuration": 60000
    }
    let recognizerParams: speechRecognizer.StartParams = {
      sessionId: this.sessionId,
      audioInfo: { audioType: 'pcm', sampleRate: 16000, soundChannel: 1, sampleBit: 16 },
      extraParams: extraParam
    }
    this.asrEngine && this.asrEngine.startListening(recognizerParams);
    // let that = this
    // timer = setInterval(async () => {
    //   if (that.duration >= 60) {
    //     // 如果大于50s就重新初始化语音识别，避免识别出现异常
    //     that.duration = 0
    //     // that.asrEngine.shutdown();
    //     clearInterval(timer)
    //     return
    //   }
    //   that.writeAudio(that.endBufferSize)
    //   await countDownLatch(1)
    //   console.log('yang-bufferSize1' + that.endBufferSize)
    //   that.endBufferSize += 640
    //   that.duration += 0.01
    // }, 100)
  }

  // 写入需要识别的音频流
  async writeAudio (offset: number) {
    if (!this.asrEngine) {
      return
    }
    let stat = fs.statSync(this.audioFilePath);
    if (offset > stat.size + 640 || stat.size === 0) {
      // iatWS.send('open');
      this.writeAudio(offset)
      return
    }
    let file = fs.openSync(this.audioFilePath, fs.OpenMode.READ_WRITE);
    let arrayBuffer = new ArrayBuffer(640);
    fs.readSync(file.fd, arrayBuffer, {
      offset,
      length: arrayBuffer.byteLength
    });
    let uint8Array: Uint8Array = new Uint8Array(arrayBuffer);
    // iatWS.send(arrayBuffer);
    this.asrEngine && this.asrEngine.writeAudio(this.sessionId, uint8Array);
    fs.closeSync(file);
    await countDownLatch(1)
    this.endBufferSize += 640
    this.writeAudio(this.endBufferSize)
  }



  // 注册audioRecorder回调函数
  setAudioRecorderCallback() {
    if (this.avRecorder != undefined) {
      // 状态机变化回调函数
      this.avRecorder.on('stateChange', (state: media.AVRecorderState, reason: media.StateChangeReason) => {
        // switch (state) {
        //   case 'started':
        //
        //     break
        //   case 'paused':
        //     this.asrEngine.cancel(this.sessionId);
        //     break
        // }
      })
      // 错误上报回调函数
      this.avRecorder.on('error', (err: BusinessError) => {
        console.error(`AudioRecorder failed, code is ${err.code}, message is ${err.message}`);
      })
    }
  }

  // 开始录制对应的流程
  async startRecordingProcess() {
    if (this.avRecorder != undefined) {
      await this.avRecorder.release();
      this.avRecorder = undefined;
    }
    // 1.创建录制实例
    this.avRecorder = await media.createAVRecorder();
    this.setAudioRecorderCallback();
    // 2.获取录制文件fd赋予avConfig里的url；参考FilePicker文档
    let res = fs.accessSync(this.audioFilePath);
    // let flag = false
    if (res) {
      let stat = fs.statSync(this.audioFilePath);
      console.log('stat.size', stat.size)
    }
    let fdPath = 'fd://';
    let file = await fs.open(this.audioFilePath, fs.OpenMode.READ_WRITE | fs.OpenMode.CREATE);
    fdPath = fdPath + '' + file.fd;
    this.avConfig.url = fdPath
    // 3.配置录制参数完成准备工作
    await this.avRecorder.prepare(this.avConfig);
    // 4.开始录制
    await this.avRecorder.start();
    if (res) {
      let stat = fs.statSync(this.audioFilePath);
      console.log('stat.size', stat.size)
    }
    this.createByCallback()
  }
}

export { MeetingRecording }