<template>
  <view class="tf-speech-recognizer" @tap="show">
   <slot></slot>
   <!-- 遮罩层 -->
   <view class="mask" @tap.stop="isShow = false" v-if="isShow" @touchmove.stop.prevent="moveHandle"></view>
   <!-- 多选控件 -->
   <view class="conbox record" :class="{'pickerShow':isShow}" >
    <!-- 此处可放置倒计时，可根据需要自行添加 -->
    <view class="time"> 
      {{showRecordTime}}
    </view>
    <view class="c999">{{ displayContent }}</view>
    <view class="record-box">
      <span v-if="voicePath" class="cancel" @tap.stop="isShow = false"></span>
      <canvas class="canvas" canvas-id="canvas" @touchstart="start" @longpress="record" @touchend="end" @touchmove.stop.prevent="moveHandle">
        <span class="recording"></span>
      </canvas>
      <span v-show="voicePath" class="confirm" @tap.stop="okClick"></span>
    </view>
    <view class="c666 fz32 domess">长按录音</view>
   </view>
  </view>
</template>
<script>
  import permision from '@/js_sdk/wa-permission/permission.js'
  export default {
    name: 'tf-speech-recognizer',
    props: {
      // 录音最小时长，单位秒
      minTime: {
        type:Number,
        default: 1
      },
      // 录音最大时长，单位秒
      maxTime: {
        type: Number,
        default: 10
      }
    },		
    data() {
      return {
        // 是否显示录音面板
        isShow: false,
        // 录音管理对象
        recorderManager: null,
        // 录音时长
        recordTime: 0,
        // 画布动画ID
        drawTimer: null,
        // 录音文件路径
        voicePath: '',
        // 识别结果
        recognitionResult: ''
      }
    },
    computed: {
      displayContent() {
        if (!this.voicePath) return `最短${this.minTime}秒，最长${this.maxTime}秒`
        return this.recognitionResult || '未识别出结果'
      },
      showRecordTime() {
        const mm = Math.floor(this.recordTime / 60000).toString().padStart(2, '0')
        const ss = Math.floor((this.recordTime % 60000) / 1000).toString().padStart(2, '0')
        return `${mm}:${ss}`
      }
    },
    methods: {
      moveHandle() {
        return false
      },
      // 初始化
      initValue() {
        // 根据 canvas 动态中心点
        const canvasQuery = uni.createSelectorQuery()
        canvasQuery.select('.canvas').boundingClientRect()
        canvasQuery.exec(res => {
          // console.log('canvasQuery', res)
          this.tempw = res[0].width
          this.temph = res[0].height
        })
        // 根据中心图片的大小计算圆环的大小
        const recordingQuery = uni.createSelectorQuery()
        recordingQuery.select(".recording").boundingClientRect()
        recordingQuery.exec(res => {
          // console.log('recordingQuery', res)
          this.tempw1 = res[0].width
        })
      },	
      // 显示组件
      async show() {
        const result = await permision.requestAndroidPermission('android.permission.RECORD_AUDIO')
        if (result !== 1) {
          uni.showModal({
            title: '温馨提示',
            content: '语音识别功能需要您开启麦克风权限才能使用，请开启麦克风使用权限',
            showCancel: false,
            confirmText: '好的'
          })
          return
        }
        this.recognitionResult = ''
        this.voicePath = ''
        this.recorderManager = uni.getRecorderManager()
        // console.log('RecorderManager', this.recorderManager)
        // 录音停止事件
        this.recorderManager.onStop(res => {
          this.voicePath = res.tempFilePath
          this.loadFileData(res.tempFilePath)
        })
        setTimeout(() => {
          this.isShow = true
          this.initValue()
        }, 100)
      },
      // 点击确定
      okClick() {
        this.$emit('okClick', this.recognitionResult)
        this.isShow = false
      },
      
      start() {
        this.voicePath = ''
        this.recordTime = 0
        // 生成canvas对象
        this.canvasObj = uni.createCanvasContext('canvas')
      },
      end() {
        if (!this.drawTimer) return
        const recordTime = this.recordTime
        // 清除画布动画  计时器
        clearInterval(this.drawTimer)
        this.drawTimer = null
        // 清除计时
        this.recordTime = 0
        // 清除canvas内容
        this.canvasObj.clearRect(0, 0, this.canvasObj.width, this.canvasObj.height)
        this.canvasObj.draw()
        if (recordTime < this.minTime * 1000) {
          uni.showToast({ icon: 'none', title: '时间太短，请重新录制', duration: 2000 })
          return
        }
        this.recorderManager.stop()
      },
      record() {
        // 开始录音
        const maxTimems = this.maxTime * 1000
        // 录音时长比输入的要长2s，因为计时回调要处理动画，会拉大时长
        this.recorderManager.start({ duration: maxTimems + 2000 })
        // 设置参数
        const centerX = this.tempw/2 + 1
        const centerY = this.temph/2 + 1
        // 圆环的半径 = 中间图片的宽度/2 + 4
        const yuanhuanW = this.tempw1/2 + 4
        // 录音过程圆圈动画的背景圆
        this.canvasObj.beginPath()
        this.canvasObj.setStrokeStyle("#fe3b54")
        this.canvasObj.setGlobalAlpha(0.3)
        this.canvasObj.setLineWidth(3)
        this.canvasObj.arc(centerX, centerY, yuanhuanW , 0, 2*Math.PI)
        this.canvasObj.stroke()
        this.canvasObj.draw()
        // 录音过程圆圈动画
        const frame = 40
        const timeGap = 1000 / frame
        const scale = 2*Math.PI / (this.maxTime * frame)
        const sAngle = -0.5*Math.PI
        let eAngle = sAngle
        this.drawTimer = setInterval(() => {
          // 计时
          this.recordTime += timeGap
          if (this.recordTime === maxTimems) {
            this.end()
            return
          }
          // 动画
          this.canvasObj.beginPath()
          this.canvasObj.setStrokeStyle("#fe3b54")
          this.canvasObj.setGlobalAlpha(1)
          this.canvasObj.setLineWidth(3)
          this.canvasObj.arc(centerX, centerY, yuanhuanW, sAngle, (eAngle += scale), false)
          this.canvasObj.stroke()
          this.canvasObj.draw(true)
        }, timeGap - 1) // 减去回调中的代码耗时
      },
      
      loadFileData(recordPath) {
        plus.io.requestFileSystem(plus.io.PRIVATE_DOC, fs => {
          fs.root.getFile(recordPath, {
            create: false
          }, fileEntry => {
            fileEntry.file(file => {
              const fileReader = new plus.io.FileReader()
              fileReader.onloadend = evt => {
                // 获取 Data URL 中的数据部分
                // https://developer.mozilla.org/zh-CN/docs/Web/HTTP/Basics_of_HTTP/Data_URLs
                const content = evt.target.result.split(',')[1]
                // console.log('onloadend: ', content.length)
                // 识别方法
                const action = this.speechRecognizing(content, file.size, this.$globalData.userInfo._PK_)
                // 执行识别
                action.then(result => this.recognitionResult = result).catch(() => {})
              }
              fileReader.readAsDataURL(file)
            }, fileError => {
              console.log('FileError: ', fileError)
              uni.showToast({ icon: 'none', title: fileError.message })
            })
          }, getFileError => {
            console.log('GetFileError: ', getFileError)
            uni.showToast({ icon: 'none', title: getFileError.message })
          })
        }, requestFileSystemError => {
          console.log('RequestFileSystemError: ', requestFileSystemError)
          uni.showToast({ icon: 'none', title: requestFileSystemError.message })
        })
      },
      
      /**
       * @abstract 百度语音识别文字接口封装
       * @param {String} speech mp3 格式，音频数据的 Base64 编码字符串
       * @param {Number} len 音频文件大小（非 speech 字符串长度）
       * @param {String} cuid 用户唯一标识，用来区分用户，计算 UV 值
       * @param {Boolean} [showLoading] 是否显示加载提示
       * @param {Boolean} [showErrorToast] 是否显示错误提示
       */
      speechRecognizing(speech, len, cuid, showLoading=true, showErrorToast=true) {
        return new Promise((resolve, reject) => {
          const errorHandler = (message, no = '') => {
            if (no) console.log(`${no}: ${message}`)
            showLoading && uni.hideLoading()
            showErrorToast && uni.showToast({ icon: 'none', title: message, duration: 2000 })
            reject(message)
          }
          showLoading && uni.showLoading({ title: '正在识别' })
          // 获取最新的 Token
          // https://ai.baidu.com/ai-doc/REFERENCE/Ck3dwjhhu
          uni.request({
            url: 'https://aip.baidubce.com/oauth/2.0/token',
            data: {
              grant_type: 'client_credentials',
              client_id: 'API Key',
              client_secret: 'Secret Key'
            },
            success: result => {
              // console.log(result)
              if (result.statusCode !== 200) {
                errorHandler(`【${result.statusCode}】${result.errMsg}`, 'F11')
                return
              }
              if (result.data.error_description) {
                errorHandler(result.data.error_description, 'F12')
                return
              }
              const token = result.data.access_token
              // 上传数据并识别
              // https://cloud.baidu.com/doc/SPEECH/s/Vk38lxily#json方式上传音频
              uni.request({
                method: 'POST',
                url: 'http://vop.baidu.com/server_api',
                header: { 'Content-Type': 'application/json' },
                data: {
                  format: 'm4a',
                  rate: 16000,
                  channel: 1,
                  dev_pid: 1537,
                  token: token,
                  cuid: cuid,
                  len: len,
                  speech: speech
                },
                success: res => {
                  // console.log(res)
                  uni.hideLoading()
                  if (res.statusCode !== 200) {
                    errorHandler(`【${res.statusCode}】${res.errMsg}`, 'F2')
                    return
                  }
                  if (res.data.err_no === 0) {
                    const result = res.data.result[0].replace('。', '')
                    resolve(result)
                  } else {
                    errorHandler(`【${res.data.err_no}】${res.data.err_msg}`, 'F3')
                  }
                },
                fail: err => errorHandler(err.errMsg, 'F1')
              })
            },
            fail: error => errorHandler(error.errMsg, 'F10')
          })
        })
      }
      
    }
  }
</script>

<style lang="scss">
  .tf-speech-recognizer {
    .mask {
      position: fixed;
      z-index: 1000;
      top: 0;
      right: 0;
      left: 0;
      bottom: 0;
      background: rgba(0, 0, 0, 0.5);
    }
    .conbox {
      transition: all .3s ease;
      transform: translateY(100%);
      &.pickerShow {
        transform:translateY(0);
      } 
      position: fixed;
      z-index: 1000;
      right: 0;
      left: 0;
      bottom: 0;
      background: #fff;
    }
    .c666 { color: #666; }
    .c999 { color: #999; }
    .fz28 { font-size: 14px; }
    .fz32 { font-size: 16px; }
    
    
    .record {
      text-align: center;
      .time {
        text-align: center;
        font-size: 30px;
        color: #000;
        line-height: 50px;
        margin-top: 25px;
      }
      .domess { margin-bottom: 25px; }
      
      
      .record-box {
        display: flex;
        flex-direction: row;
        justify-content: center;
      }
      canvas {
        margin: 5px 30px;
        position: relative;
        width: 100px;
        height: 100px;
        z-index: 10;			
        .recording {
          position: absolute;
          top: 10px;
          left: 10px;
          width: 80px;
          height: 80px;
          border: 1px dashed #fe3b54;
          border-radius: 50px;
          background: #fe3b54 url(../../static/tf-speech-recognizer/recording.png) no-repeat 50% 50%;
          background-size: 50% 50%;
          z-index: 100;
        }
      }
      .btncom{
        margin-top: 35px;
        width: 40px;
        height: 40px;
        border-radius: 40px;
      }
      .cancel {
        @extend .btncom;
        background:url(../../static/tf-speech-recognizer/cancel.png) no-repeat;
        background-size: 100% 100%;
      }			
      .confirm {
        @extend .btncom;
        background:url(../../static/tf-speech-recognizer/confirm.png) no-repeat 100% 100%;
        background-size: 100% 100%;
      }
    } 
  }
</style>