import React, { Component } from 'react'
import { AudioOutlined, createFromIconfontCN } from '@ant-design/icons';
import { Button, message } from 'antd';
import CryptoJS from 'crypto-js'
import unAudio from '@/assets/unAudio.png'
import styles from './index.css'
import theStyles from '../style.less'
import Draggable from "react-draggable";
const IconFont = createFromIconfontCN({
  scriptUrl: '//at.alicdn.com/t/font_3273667_902xq4xgi45.js',
});

//APPID，APISecret，APIKey在控制台-我的应用-语音听写（流式版）页面获取
const APPID = window.APPID
const API_SECRET = window.API_SECRET
const API_KEY = window.API_KEY

/**
 * 获取websocket url
 * 该接口需要后端提供，这里为了方便前端处理
 */
function getWebSocketUrl() {
  return new Promise((resolve, reject) => {
    // 请求地址根据语种不同变化
    var url = 'wss://iat-api.xfyun.cn/v2/iat'
    var host = 'iat-api.xfyun.cn'
    var apiKey = API_KEY
    var apiSecret = API_SECRET
    var date = new Date().toGMTString()
    var algorithm = 'hmac-sha256'
    var headers = 'host date request-line'
    var signatureOrigin = `host: ${host}\ndate: ${date}\nGET /v2/iat HTTP/1.1`
    var signatureSha = CryptoJS.HmacSHA256(signatureOrigin, apiSecret)
    var signature = CryptoJS.enc.Base64.stringify(signatureSha)
    var authorizationOrigin = `api_key="${apiKey}", algorithm="${algorithm}", headers="${headers}", signature="${signature}"`
    var authorization = btoa(authorizationOrigin)
    url = `${url}?authorization=${authorization}&date=${date}&host=${host}`
    resolve(url)
  })
}

let transAudioData = {
  to16kHz(audioData) {
    var data = new Float32Array(audioData)
    var fitCount = Math.round(data.length * (16000 / 44100))
    var newData = new Float32Array(fitCount)
    var springFactor = (data.length - 1) / (fitCount - 1)
    newData[0] = data[0]
    for (let i = 1; i < fitCount - 1; i++) {
      var tmp = i * springFactor
      var before = Math.floor(tmp).toFixed()
      var after = Math.ceil(tmp).toFixed()
      var atPoint = tmp - before
      newData[i] = data[before] + (data[after] - data[before]) * atPoint
    }
    newData[fitCount - 1] = data[data.length - 1]
    return newData
  },
  to16BitPCM(input) {
    var dataLength = input.length * (16 / 8)
    var dataBuffer = new ArrayBuffer(dataLength)
    var dataView = new DataView(dataBuffer)
    var offset = 0
    for (var i = 0; i < input.length; i++, offset += 2) {
      var s = Math.max(-1, Math.min(1, input[i]))
      dataView.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true)
    }
    return dataView
  },
}

class IatRecorder {
  constructor({ language, accent, appId } = {}) {
    let self = this
    this.status = 'null'
    this.language = language || 'zh_cn'
    this.accent = accent || 'mandarin'
    this.appId = appId || APPID
    // 记录音频数据
    this.audioData = []
    // 记录听写结果
    this.resultText = ''
    // wpgs下的听写结果需要中间状态辅助记录
    this.resultTextTemp = ''
    // transWorker.onmessage = function (event) {
    //   console.log('execute12')
    //   self.audioData.push(...event.data)
    // }
  }
  // 修改录音听写状态
  setStatus(status) {
    this.onWillStatusChange && this.status !== status && this.onWillStatusChange(this.status, status)
    this.status = status
  }
  setResultText({ resultText, resultTextTemp } = {}) {
    this.onTextChange && this.onTextChange(resultTextTemp || resultText || '')
    resultText !== undefined && (this.resultText = resultText)
    resultTextTemp !== undefined && (this.resultTextTemp = resultTextTemp)
  }
  // 修改听写参数
  setParams({ language, accent } = {}) {
    language && (this.language = language)
    accent && (this.accent = accent)
  }
  // 连接websocket
  connectWebSocket() {
    return getWebSocketUrl().then(url => {
      let iatWS
      if ('WebSocket' in window) {
        iatWS = new WebSocket(url)
      } else if ('MozWebSocket' in window) {
        iatWS = new MozWebSocket(url)
      } else {
        // alert('浏览器不支持WebSocket')
        message.error('浏览器不支持WebSocket');
        return
      }
      this.webSocket = iatWS
      this.setStatus('init')
      iatWS.onopen = e => {
        this.setStatus('ing')
        // 重新开始录音
        setTimeout(() => {
          this.webSocketSend()
        }, 500)
      }
      iatWS.onmessage = e => {
        this.result(e.data)
      }
      iatWS.onerror = e => {
        this.recorderStop()
      }
      iatWS.onclose = e => {
          this.recorderStop()
      }
    })
  }
  postMessageExecute (output){
    this.audioData.push(...output)
  }
  // 初始化浏览器录音
  recorderInit() {
    navigator.getUserMedia =
      navigator.getUserMedia ||
      navigator.webkitGetUserMedia ||
      navigator.mozGetUserMedia ||
      navigator.msGetUserMedia

    // 创建音频环境
    try {
      this.audioContext = new (window.AudioContext || window.webkitAudioContext)()
      this.audioContext.resume()
      if (!this.audioContext) {
        message.error('浏览器不支持webAudioApi相关接口')
        return
      }
    } catch (e) {
      if (!this.audioContext) {
        message.error('浏览器不支持webAudioApi相关接口')
        return
      }
    }

    // 获取浏览器录音权限
    if (navigator.mediaDevices && navigator.mediaDevices.getUserMedia) {
      navigator.mediaDevices
        .getUserMedia({
          audio: true,
          video: false,
        })
        .then(stream => {
          getMediaSuccess(stream)
        })
        .catch(e => {
          getMediaFail(e)
        })
    } else if (navigator.getUserMedia) {
      navigator.getUserMedia(
        {
          audio: true,
          video: false,
        },
        stream => {
          getMediaSuccess(stream)
        },
        function(e) {
          getMediaFail(e)
        }
      )
    } else {
      if (navigator.userAgent.toLowerCase().match(/chrome/) && location.origin.indexOf('https://') < 0) {
        message.error('chrome下获取浏览器录音功能，因为安全性问题，需要在localhost或127.0.0.1或https下才能获取权限')
      } else {
        message.error('无法获取浏览器录音功能，请升级浏览器或使用chrome')
      }
      this.audioContext && this.audioContext.close()
      return
    }
    // 获取浏览器录音权限成功的回调
    let getMediaSuccess = stream => {
      // 创建一个用于通过JavaScript直接处理音频
      this.scriptProcessor = this.audioContext.createScriptProcessor(0, 1, 1)
      this.scriptProcessor.onaudioprocess = e => {
        // 去处理音频数据
        if (this.status === 'ing') {
          // transWorker.postMessage(e.inputBuffer.getChannelData(0))
          let output = transAudioData.to16kHz(e.inputBuffer.getChannelData(0))
          output = transAudioData.to16BitPCM(output)
          output = Array.from(new Uint8Array(output.buffer))
          this.postMessageExecute(output)
        }
      }
      // 创建一个新的MediaStreamAudioSourceNode 对象，使来自MediaStream的音频可以被播放和操作
      this.mediaSource = this.audioContext.createMediaStreamSource(stream)
      // 连接
      this.mediaSource.connect(this.scriptProcessor)
      this.scriptProcessor.connect(this.audioContext.destination)
      this.connectWebSocket()
    }

    let getMediaFail = (e) => {
      // message.error('未检测到麦克风')
      this.setStatus('unAudio')
      this.audioContext && this.audioContext.close()
      this.audioContext = undefined
      // 关闭websocket
      if (this.webSocket && this.webSocket.readyState === 1) {
        this.webSocket.close()
      }
    }
  }
  recorderStart() {
    // console.log('this.audioContext',this.audioContext)
    if (!this.audioContext) {
      // console.log('recorderInit')
      this.recorderInit()
    } else {
      this.audioContext.resume()
      this.connectWebSocket()
    }
  }
  // 暂停录音
  recorderStop() {
    // safari下suspend后再次resume录音内容将是空白，设置safari下不做suspend
    if (!(/Safari/.test(navigator.userAgent) && !/Chrome/.test(navigator.userAgen))){
      this.audioContext && this.audioContext.suspend()
    }
    this.webSocket && this.webSocket.close()
    this.setStatus('end')
  }
  // 处理音频数据
  // transAudioData(audioData) {
  //   audioData = transAudioData.transaction(audioData)
  //   this.audioData.push(...audioData)
  // }
  // 对处理后的音频数据进行base64编码，
  toBase64(buffer) {
    var binary = ''
    var bytes = new Uint8Array(buffer)
    var len = bytes.byteLength
    for (var i = 0; i < len; i++) {
      binary += String.fromCharCode(bytes[i])
    }
    return window.btoa(binary)
  }
  // 向webSocket发送数据
  webSocketSend() {
    if (this.webSocket.readyState !== 1) {
      return
    }
    let audioData = this.audioData.splice(0, 1280)
    var params = {
      common: {
        app_id: this.appId,
      },
      business: {
        language: this.language, //小语种可在控制台--语音听写（流式）--方言/语种处添加试用
        domain: 'iat',
        accent: this.accent, //中文方言可在控制台--语音听写（流式）--方言/语种处添加试用
        vad_eos: 5000,
        dwa: 'wpgs', //为使该功能生效，需到控制台开通动态修正功能（该功能免费）
      },
      data: {
        status: 0,
        format: 'audio/L16;rate=16000',
        encoding: 'raw',
        audio: this.toBase64(audioData),
      },
    }
    this.webSocket.send(JSON.stringify(params))
    this.handlerInterval = setInterval(() => {
      // websocket未连接
      if (this.webSocket.readyState !== 1) {
        this.audioData = []
        clearInterval(this.handlerInterval)
        return
      }
      if (this.audioData.length === 0) {
        if (this.status === 'end') {

          this.webSocket.send(
            JSON.stringify({
              data: {
                status: 2,
                format: 'audio/L16;rate=16000',
                encoding: 'raw',
                audio: '',
              },
            })
          )
          this.audioData = []
          clearInterval(this.handlerInterval)
        }
        return false
      }
      audioData = this.audioData.splice(0, 1280)
      // 中间帧
      this.webSocket.send(
        JSON.stringify({
          data: {
            status: 1,
            format: 'audio/L16;rate=16000',
            encoding: 'raw',
            audio: this.toBase64(audioData),
          },
        })
      )
    }, 40)
  }
  result(resultData) {
    // 识别结束
    let jsonData = JSON.parse(resultData)
    if (jsonData.data && jsonData.data.result) {
      let data = jsonData.data.result
      let str = ''
      let resultStr = ''
      let ws = data.ws
      for (let i = 0; i < ws.length; i++) {
        str = str + ws[i].cw[0].w
      }
      // 开启wpgs会有此字段(前提：在控制台开通动态修正功能)
      // 取值为 "apd"时表示该片结果是追加到前面的最终结果；取值为"rpl" 时表示替换前面的部分结果，替换范围为rg字段
      if (data.pgs) {
        if (data.pgs === 'apd') {
          // 将resultTextTemp同步给resultText
          this.setResultText({
            resultText: this.resultTextTemp,
          })
        }
        // 将结果存储在resultTextTemp中
        this.setResultText({
          resultTextTemp: this.resultText + str,
        })
      } else {
        this.setResultText({
          resultText: this.resultText + str,
        })
      }
    }
    if (jsonData.code === 0 && jsonData.data.status === 2) {
      this.webSocket.close()
    }
    if (jsonData.code !== 0) {
      this.webSocket.close()
    }
  }
  start() {
    this.recorderStart()
    this.setResultText({ resultText: '', resultTextTemp: '' })
  }
  stop() {
    this.recorderStop()
  }
}
let iatRecorder = new IatRecorder()

class FloatWindow extends Component {
  constructor(props) {
    super(props);
    this.state = {
      // audioModalVisible:true, // 语音识别弹框
      resultText:iatRecorder.resultText,
      activeDrags: 0,
      status: 'null',
    };
  }


  componentDidMount() {
    // this.props.onRef(this)
    this.props.onRef(this)
    window.addEventListener('keypress',this.keypressHandler)

  }
  componentWillUnmount(){
    window.removeEventListener('keypress',this.keypressHandler);
  }
  keypressHandler=(e)=>{
      // console.log(e.keyCode)
    const { effectAudio } = this.props;
    if (!effectAudio){
      return
    }
      if (e.keyCode === 32 && (iatRecorder.status ==='end' || iatRecorder.status ==='null')){
        // console.log('stop',iatRecorder.status)
        this.onClickAudioStart()
      }else if (e.keyCode === 32 && iatRecorder.status === 'ing'){
        // console.log('stop',iatRecorder.status)
        this.onClickAudioStop()
      }
  }
  onCancelAudio(){
    this.setState({resultText:''})
    // clearInterval(setStatus)
    iatRecorder.stop()
  }

  componentWillReceiveProps(nextProps) {
  }

  audioModalOnCancel(){
    this.props.cancelCallback()
  }

  onClickAudioStop(){
    iatRecorder.stop()
    // this.setState({status:'end'})
    // this.doMark()
    // clearInterval(setStatus)
  }
  onClickRestart(){
    iatRecorder.setStatus('end')
    this.props.effectAudioCallback()
  }
  onClickAudioStart(){
    iatRecorder.start()
    this.props.effectAudioCallback()
    let setResultText = setInterval(() => {
      if (iatRecorder.resultText !== this.state.resultText){
        this.setState({
          resultText: iatRecorder.resultText
        })
      }
    },500)
    let setStatus = setInterval(() => {
      // console.log('iatRecorder.status',iatRecorder.status,this.state.status)
      if (iatRecorder.status === 'end' && this.state.status ==='ing'){
        // console.log('doMarkiatRecorder')
        this.doMark()
        this.setState({
          status: iatRecorder.status,
        })
        clearInterval(setStatus)
        clearInterval(setResultText)
      }
      if (iatRecorder.status !== this.state.status){
        this.setState({
          status: iatRecorder.status
        })
      }
    },500)
  }
  doMark(){
    // console.log(iatRecorder)
    this.setState({resultText:''})
    let reg = /[\。\，]/g;// 替换字符串中的标点符号
    // audioContent = audioContent.replaceAll(reg,"")
    this.props.audioMarkCallback(iatRecorder.resultText.replaceAll('。',""))
  }

  clickResultText(){
    // console.log('(iatRecorder.resultText)',iatRecorder.resultText)
  }
  onDragFun(e){

  }

  onStart = () => {
    this.setState({activeDrags: ++this.state.activeDrags});
  };

  onStop = () => {
    this.setState({activeDrags: --this.state.activeDrags});
  };
  render() {
    const { audioModalVisible,effectAudio } = this.props;
    const { resultText,status } = this.state;
    const dragHandlers = {onStart: this.onStart, onStop: this.onStop};
    let reg = /[\。\，]/g;
    return <>

      {audioModalVisible && <Draggable {...dragHandlers} >
        <div className={styles.floatWindow} onDrag={this.onDragFun.bind(this)}>
          <div>语音标注</div>
          <div style={{textAlign: 'center'}}>
            {(!effectAudio || iatRecorder.status ==='unAudio') ? <img src={unAudio}/> :
              (status ==='end' || status ==='null') ? <AudioOutlined
                style={{fontSize: 60, color: '#fff', borderRadius: 60, backgroundColor: '#29CCB1', padding: 10}}
                onClick={this.onClickAudioStart.bind(this)} /> :
                <div style={{color: '#29CCB1',display:'flex'}}>
                  <div style={{margin:'auto',width:130,marginRight:-7}}><span  style={{float:'right',fontSize:27}} className={theStyles.ballon}>-----------</span></div>
                  <div>
                    <IconFont
                      style={{fontSize: 60, color: '#29CCB1', borderRadius: 60, padding: 10}} theme="outlined" type="icon-stopcircle"
                      onClick={this.onClickAudioStop.bind(this)}
                    />
                  </div>
                  <div style={{margin:'auto',width:130,marginLeft:-7}}><span  style={{float:'left',fontSize:27}} className={theStyles.ballon}>-----------</span></div>
                </div>

            }

            {/*<div style={{marginTop:10}}>{*/}
            {/*  (status ==='end' || status ==='null') && !resultText && '按（空格键）或点击按钮开始/结束说话'*/}

            {/*}</div>*/}
            <div style={{marginTop:10}}>{!effectAudio ? '没有检测到有效声音，请再试一次' : iatRecorder.status ==='unAudio'? '未检测到麦克风' : status ==='ing' ? !resultText ? '请开始说话': resultText.replaceAll('。',"").replace(/井号/g,"#").replaceAll("，","") : '按（空格键）或点击按钮开始/结束说话'}</div>
            {(!effectAudio || iatRecorder.status ==='unAudio') && <Button style={{backgroundColor:'#D6DAE0',border:'#D6DAE0',color:'#000000', marginTop:10}} type="primary" onClick={this.onClickRestart.bind(this)}>重新开始</Button>}
          </div>
          {/*<Button type="primary" onClick={this.doMark.bind(this)}>确定</Button>*/}
        </div>
      </Draggable>}
    </>;
  }
}

export default FloatWindow
