import React, { useEffect, useState, useRef } from 'react'
import { Button, View, Image, Input, Form } from '@tarojs/components'
import Taro from '@tarojs/taro'
import styles from './index.module.scss'
import { onAppHide, requirePlugin } from "@tarojs/taro";

// 引入插件
var plugin = requirePlugin("WechatSI");
// const manager = plugin.getRecordRecognitionManager();
export default function CustomerService(props) {
  const [text, setText] = useState('按住 说话')
  //录音配置
  const [config, setConfig] = useState<any>({})
  const [iptValue, setIptValue] = useState<string>('')
  //录音列表
  const recorderManager = useRef<any>(Taro.getRecorderManager())
  const manager = useRef<any>(plugin.getRecordRecognitionManager())
  const flag = useRef<boolean>(true)
  const [imgUrl, setImgUrl] = useState<any>(
    'http://111.34.82.98:10001/talent/files/37bbe8be-678f-4a80-a316-2ddca3b261b9/voice.png'
  )
  useEffect(() => {
    //初始化录音管理器

    recorderManager.current.onStop(function(res) {
      var src = res.tempFilePath
      var aa = {
        src: src,
        width: config.width,
        seconds: config.shijian,
        play: false,
        type: 'video',
        status: 'ask'
      }
      props.emitDataList(aa)
      props.changeHeight(0)
    })
  // 有新的识别内容返回，则会调用此事件
  manager.current.onRecognize = function (res) {
    console.log(res,'onRecognizeonRecognizeonRecognize');
  };
  // 正常开始录音识别时会调用此事件
  manager.current.onStart = function (res) {
    console.log('成功开始录音识别', res);
    // 开始录音时-抖动一下手机
    Taro.vibrateShort({ type: 'medium' });
  };
  // 识别错误事件
  manager.current.onError = function (res) {
    console.error('error msg', res);
    const tips = {
      '-30003': '说话时间间隔太短，无法识别语音',
      '-30004': '没有听清，请再说一次~',
      '-30011': '上个录音正在识别中，请稍后尝试',
    };
    const retcode = res?.retcode.toString();
    retcode &&
    Taro.showToast({
        title: tips[`${retcode}`],
        icon: 'none',
        duration: 2000,
      });
  };
  console.log(manager,'managermanagermanagermanager');
  //识别结束事件
  manager.current.onStop = function (res) {
    console.log('..............结束录音', res);
    console.log('录音临时文件地址 -->', res.tempFilePath);
    console.log('录音总时长 -->', res.duration, 'ms');
    console.log('文件大小 --> ', res.fileSize, 'B');
    console.log('语音内容 --> ', res.result);
    if (res.result === '') {
      Taro.showModal({
        title: '提示',
        content: '没有听清，请再说一次~',
        showCancel: false,
      });
      return;
    }

  };










    return () => {
      recorderManager.current.stop()
      manager.current.stop();
    }
  }, [config]) // 这是因为 config 状态在组件挂载时被初始化了一次，并且在 useEffect 中被设置为依赖项为空数组 []，这意味着 recorderManager 的事件监听器只会在组件首次挂载时设置一次，而不会随着 config 的变化而重新设置。
  const onTouchstart = (e) => {
    setText('松开 结束')
    //摁下的开始时间
    var start = e.timeStamp
    var seconds = (start % (1000 * 60)) / 1000

    setConfig({
      ...config,
      start: seconds,
      luStatu: true
    })
    manager.current.start({
      duration: 30000,
      lang: 'zh_CN',
    });
    recorderManager.current.start({
      format: 'mp3'
    })
  }

  const onTouchend = (e) => {
    setText('按住 说话')
    var start = config.start
    var end = e.timeStamp
    var seconds = (end % (1000 * 60)) / 1000
    var shijian = Math.ceil(seconds - start)
    var width = shijian >= 1 && shijian <= 2 ? 100 : shijian * 40
    setConfig({
      ...config,
      end: seconds,
      shijian: shijian,
      luStatu: false,
      width: width
    })
    manager.current.stop();
    recorderManager.current.stop()
    console.log('按了' + shijian + '秒')
    console.log('width是', width)
  }

  const imgClick = () => {
    props.changeHeight(0)
    if (flag.current) {
      setImgUrl('http://111.34.82.98:10001/talent/files/d57ef16f-31bc-47c1-8e7e-66b3d01e2f45/keyboard.png')
      flag.current = false
    } else {
      setImgUrl('http://111.34.82.98:10001/talent/files/37bbe8be-678f-4a80-a316-2ddca3b261b9/voice.png')
      flag.current = true
    }
  }
  const inputConfirm = () => {
    props.emitDataList({
      text: iptValue,
      type: 'text',
      status: 'ask'
      // width: e.detail.value.length * 35,
    })
    setIptValue('')
    // props.emitText(e.detail.value);
  }
  const iptFun = (e) => {
    setIptValue(e.detail.value)
  }
  const onFocus = (e) => {
    console.log(e.detail.height, 'onFocusonFocusonFocusonFocusonFocus')
    props.changeHeight(e.detail.height)
  }
  const onBlur = (e) => {
    console.log(e.detail.height, 'onBluronBluronBluronBlur')
    props.changeHeight(e.detail.height)
  }
  return (
    <View className={styles['bottomBtn']}>
      <Image style='width: 50rpx;height: 50rpx' src={imgUrl} onClick={imgClick} />
      {flag.current ? (
        <>
          <Input
            type='text'
            cursorSpacing={5}
            value={iptValue}
            onInput={iptFun}
            adjust-position={false}
            style={{ width: iptValue ? '73%' : '90%' }}
            onFocus={onFocus}
            onBlur={onBlur}
            className={styles['input']}
          />
          {iptValue ? (
            <Button size='mini' className={styles['sendBtn']} onClick={inputConfirm}>
              发送
            </Button>
          ) : (
            ''
          )}
        </>
      ) : (
        <Button className={styles['button']} onTouchstart={onTouchstart} onTouchend={onTouchend}>
          {text}
        </Button>
      )}
    </View>
  )
}
