import Taro from '@tarojs/taro'
import api from '@API'

const AudioUtil = {
  baiduToken: '',
  audioCtx: undefined,
  initAudioCtx(callback) {
    if (this.audioCtx) return
    this.audioCtx = Taro.createInnerAudioContext()
    // this.audioCtx.onCanplay(() => console.log('onCanplay'))
    // this.audioCtx.onPlay(() => console.log('onPlay'))
    // this.audioCtx.onTimeUpdate(() => console.log('onTimeUpdate'))
    // this.audioCtx.onPause(() => console.log('onPause'))
    // this.audioCtx.onWaiting(() => console.log('onWaiting'))
    this.audioCtx.onStop(callback)
    this.audioCtx.onEnded(callback)
    // this.audioCtx.onSeeking(() => console.log('onSeeking'))
    // this.audioCtx.onSeeked(() => console.log('onSeeked'))
    // this.audioCtx.onError(res => console.log('onError:', res))
  },
  playAudio(src) {
    if (!this.audioCtx) {
      this.initAudioCtx()
    }
    if (!src) return
    this.audioCtx.src = src
    this.audioCtx.play()
  },
  stopAudio() {
    if (!this.audioCtx) {
      this.initAudioCtx()
    }
    this.audioCtx.stop()
  },
  destroyAudio() {
    if (!this.audioCtx) return
    this.audioCtx.stop()
    this.audioCtx = undefined
  },
  async getBaiduAITokenData() {
    try {
      const data = await api.requestBaiduToken()
      this.baiduToken = data.token
    } catch (error) {
      Taro.showToast({
        title: '获取语音AI凭证出错',
        icon: 'none',
      })
    }
  },
  async transferNetworkFile2Text(url) {
    try {
      const res = await Taro.request({
        url,
        method: 'GET',
        responseType: 'arraybuffer'
      })
      const base64Str = await Taro.arrayBufferToBase64(new Uint8Array(res.data))
      const size = parseInt((base64Str).replace(/=/g, '').length * 0.75, 10);
      const data = await this.voiceTransfer(base64Str, size)
      return data
    } catch (error) {
      return '101'
    }
  },
  async voiceTransfer(base64Str, size) {
    if (!this.baiduToken) {
      await this.getBaiduAITokenData()
    }
    const url = 'https://vop.baidu.com/pro_api';
    const params = {
      format: 'wav', // 语音文件的格式，pcm、wav、amr、m4a
      rate: 16000, // 采样率，16000，固定值
      dev_pid: 80001, // 普通话
      channel: 1, // 声道数，仅支持单声道，请填写固定值 1
      token: this.baiduToken, // 开放平台获取到的开发者[access_token]
      cuid: 'care_workshop', // 用户唯一标识，用来区分用户，计算UV值。
      len: size, // 本地语音文件的的字节数，单位字节
      speech: base64Str, // 本地语音文件的的二进制语音数据 ，需要进行base64 编码
    };
    const res = await Taro.request({
      url,
      method: 'POST',
      header: {
        'Content-Type': 'application/json',
        Accept: 'application/json',
      },
      data: params
    })
    return res?.data?.result
  }
}

export default AudioUtil;
