<template>
  <!-- 语音服务 -->
  <div style="width: 100%; background: aliceblue; text-align: left">
    <div class="topIcon" @click="logout">
      <img src="../../assets/images/home/tuichu.png" />
    </div>
    <!-- 语音识别 -->
    <div>
      <iframe
        ref="myiframe"
        name="myiframe"
        class="iframe"
        id="myiframe"
        style="width: 0; height: 0"
        :src="`/speechService/examples/index.html`"
        frameborder="0"
        scrolling="no"
      ></iframe>
      <div class="SpeechRecognition">
        <div class="SRcondition">
          类型
          <el-select v-model="IdentifyTypeValue" placeholder="请选择" @change="identifyTypeValueChange()">
            <el-option
              v-for="item in IdentifyType"
              :key="item.value"
              :label="item.label"
              :value="item.value"
            >
            </el-option>
          </el-select>
          <el-button type="primary" style="margin-left: 10px" @click="start">{{
            IdentifyTypeValue == "OneSentence" ? "开始录音" : "开始"
          }}</el-button>
          <el-button type="warning" @click="end">{{
            IdentifyTypeValue == "OneSentence" ? "停止录音" : "停止"
          }}</el-button>
          <el-button type="primary" v-show="IdentifyTypeValue == 'OneSentence'" @click="changeText()" >转文字</el-button>
         <audio class="OneSentenceAudio" style="vertical-align: middle;" :src="audioUrl" controls v-show="IdentifyTypeValue == 'OneSentence'"></audio>
        </div>
        <el-input
          disabled
          class="inputBox"
          v-model="textarea1"
          type="textarea"
          rows="10"
          cols="115"
        ></el-input>
      </div>
    </div>
    <!-- 对话机器人 -->
    <div class="ConversationalBots">
      <el-row :gutter="20">
        <el-col :span="11">
          <div>
            <el-input
              class="inputBox"
              placeholder="请输入对话内容"
              v-model="ConverBotsInput"
              type="textarea"
              :clearable="true"
              rows="10"
              style="width: 100%"
            ></el-input></div
        ></el-col>
        <el-col :span="2"
          ><div>
            <el-button
              type="primary"
              plain
              style="width: 100%"
              @click="ConverBots()"
              >对话</el-button
            >
          </div></el-col
        >
        <el-col :span="11"
          ><div>
            <el-input
              class="inputBox"
              v-model="ConverBotsOutput"
              rows="10"
              style="width: 100%"
              type="textarea"
            ></el-input></div
        ></el-col>
      </el-row>
    </div>
    <!-- 语音合成 -->
    <div class="SpeechSynthesis">
      <el-form label-position="left" label-width="100px">
        <el-form-item label="请输入文本">
          <el-input
            size="medium"
            placeholder="请输入要合成语音的文本"
            class="inputBox"
            cols="100"
            rows="3"
            maxlength="100"
            show-word-limit
            type="textarea"
            v-model="SpeechSynthesisValue"
          ></el-input>
        </el-form-item>
        <el-form-item label="url" v-show="SpeechSynthesisUrl">
          <el-link type="success">{{ SpeechSynthesisUrl }}</el-link>
        </el-form-item>
        <el-form-item label="操作">
          <el-button type="primary" @click="postSpeechSynthesis"
            >运行</el-button
          >
          <el-button type="primary" @click="playSpeechSynthesis"
            >播放</el-button
          >
        </el-form-item>
      </el-form>
    </div>
  </div>
</template>

<script>
import { trtcSpeech, recognise,conversate  } from "../../api/index";//
import Recorder from "js-audio-recorder";
export default {
  name: "speechService",
  data() {
    return {
      messageVoice: null, //消息的语音
      recorder: null, // 一句话识别的录音
      textarea1: "",
      textarea2: "",
      andioSrc: "",
      IdentifyType: [
        {
          value: "RealTime",
          label: "实时识别",
        },
        {
          value: "OneSentence",
          label: "一句话识别",
        },
      ],
      IdentifyTypeValue: "RealTime",
      SpeechSynthesisUrl: "",
      SpeechSynthesisValue: "",
      ConverBotsInput: "",
      ConverBotsOutput: "",
      audioUrl:""
    };
  },
  mounted() {
    this.messageVoice = new Audio();
    this.recorder = new Recorder();
    // 收消息
    var This = this;
    window.addEventListener("message", (e) => {
      switch (e.data.cmd) {
        case "change":
          This.textarea1 = e.data.params.currentText;
          break;
      }
    });
  },
  methods: {
    identifyTypeValueChange(){
      this.textarea1=""
      if(this.IdentifyTypeValue == 'RealTime'){ //切换到实时识别，一句话识别复原
        this.audioUrl=""
        this.recorder.stop(); // 暂停录音
      }
      if(this.IdentifyTypeValue == 'OneSentence'){ //切换到一句话识别，实时识别复原
        document.getElementById("myiframe").contentWindow.postMessage(
          {
            cmd: "end",
          },
          "*"
        );
      }
    },
    logout() {
      // this.recorder.destroy(); // 毁实例
      this.$router.push("/");
    },
    start() {
      if (this.IdentifyTypeValue == "RealTime") {//实时识别
        // 发消息
        this.textarea1 = "";
        document.getElementById("myiframe").contentWindow.postMessage(
          {
            cmd: "start",
          },
          "*"
        );
      } else if (this.IdentifyTypeValue == "OneSentence") {//一句话识别
        Recorder.getPermission().then(
          () => {
            console.log("开始录音");
            this.recorder.start(); // 开始录音
          },
          (error) => {
            this.$message.error("请先允许该网页使用麦克风");
            console.log(`${error.name} : ${error.message}`);
          }
        );
      }
    },
    end() {
      if (this.IdentifyTypeValue == "RealTime") {
        //实时识别
        document.getElementById("myiframe").contentWindow.postMessage(
          {
            cmd: "end",
          },
          "*"
        );
        this.$message.info("实时识别已停止");
      } else if (this.IdentifyTypeValue == "OneSentence") {
        this.recorder.stop(); // 暂停录音
        //this.recorder.getWAVBlob() 获取录音数据的方法获取的blob对象
        // 将blob转换为url
        this.audioUrl = URL.createObjectURL(this.recorder.getWAVBlob());
      }
    },
    async changeText(){
      const formData = new FormData()
      const blob = this.recorder.getWAVBlob()// 获取wav格式音频数据
      // this.audioUrl = URL.createObjectURL(blob);
      // 此处获取到blob对象后需要设置fileName满足当前项目上传需求，其它项目可直接传把blob作为file塞入formData
      const newbolb = new Blob([blob], { type: 'audio/wav' })
      const fileOfBlob = new File([newbolb], new Date().getTime() + '.wav')
      formData.append('voice', fileOfBlob)
      await recognise(formData)
        .then((res) => {
          if (res.data.code == 0) {
            this.textarea1 = res.data.result
            if(!res.data.result){
                this.$message.info("一句话识别结果为空");
            }
          } else {
            this.$message.error("一句话识别失败");
          }
        })
        .catch((error) => {
          console.log("一句话识别失败", error);
          this.$message.error("一句话识别失败");
        });
    }, //一句话识别：语音转文字
    play() {
      if(this.recorder){
        this.recorder.play(); // 播放录音
      } else{
        this.$message.info("请先开始录音");
      }
    },
    playpause(){
      this.recorder.pausePlay()
    },
    // 语音合成
    async postSpeechSynthesis() {
      let This = this;
      // This.SpeechSynthesisUrl = ""
      // This.messageVoice.src = ""
      await trtcSpeech("message=" + this.SpeechSynthesisValue)
        .then((res) => {
          console.log("调用接口的结果", res);
          if (res.data.code == 0) {
            This.SpeechSynthesisUrl = res.data.result;
            This.messageVoice.src = res.data.result;
            this.$message.success("语音合成成功");
          } else {
            this.$message.error("语音合成失败");
          }
        })
        .catch((error) => {
          console.log("获取失败", error);
          this.$message.error("语音合成失败");
        });
    },
    playSpeechSynthesis() {
      this.messageVoice.play();
      // this.speak1({text:this.SpeechSynthesisValue})
    },


    // speak1({ text, speechRate, lang, volume, pitch }) {
    //         console.log(window.SpeechSynthesisUtterance,'window.SpeechSynthesisUtterance')
    //         if (!window.SpeechSynthesisUtterance) {
    //             console.log('当前浏览器不支持文字转语音服务')
    //             return;
    //         }

    //         if (!text) {
    //             return;
    //         }
                                        
    //         const speechUtterance = new SpeechSynthesisUtterance();
    //         speechUtterance.text = text;
    //         speechUtterance.rate = speechRate || 1;
    //         speechUtterance.lang = lang || 'zh-CN';
    //         speechUtterance.volume = volume || 1;
    //         speechUtterance.pitch = pitch || 1;

    //         var voices = window.speechSynthesis.getVoices()
    //         // 设置实例的voice
    //         speechUtterance.voice = voices.filter(function(voice) { return voice.localService == true && voice.lang == "zh-CN"; })[0];


    //         // 语音合成出错
    //         speechUtterance.addEventListener('error', (event) => {
    //             // 朗读错误时触发
    //             console.error('朗读错误:', event, event.error);
    //         });
    //         // 实例属性，语音合成结束
    //         speechUtterance.onend = function() {
    //             // endEvent && endEvent();
    //             console.log('朗读结束');
    //         };
            
    //         // 实例属性，语音合成开始
    //         speechUtterance.onstart = function() {
    //             // startEvent && startEvent();
    //             console.log('朗读开始');
    //         };
    //         speechUtterance.onerror= function() {
    //             // startEvent && startEvent();
    //             console.log('朗读错误');
    //         };
    //         //在语音合成过程中达到特定边界时触发的事件处理程序。
    //         speechUtterance.onboundary= () => {
    //             console.log('边界');
    //         }
    //         speechSynthesis.speak(speechUtterance);
    //         console.log('speechUtterance',speechUtterance)
    //         return speechUtterance;
    // },


    async ConverBots() {
    // await conversate("message=" + this.ConverBotsInput)
    //  .then((res) => {
    //       console.log("调用接口的结果", res);
    //       if (res.data.code == 0) {
    //         this.ConverBotsOutput = res.data.result;
    //       } else {
    //         this.$message.error("自然语言闲聊失败");
    //       }
    //     })
    //     .catch((error) => {
    //       console.log("获取失败", error);
    //       this.$message.error("自然语言闲聊失败");
    //     });
      await conversate(this.ConverBotsInput)
        .then((res) => {
          console.log("调用接口的结果", res);
          if (res.data.code == 200) {
            this.ConverBotsOutput = res.data.data;
          } else {
            this.$message.error("自然语言闲聊失败");
          }
        })
        .catch((error) => {
          console.log("获取失败", error);
          this.$message.error("自然语言闲聊失败");
        });
    },
  },
  watch:{
    'recorder.duration': {
		handler(val) {
			if(val >= 30){ //文档最大60秒
        this.end()
        this.$message.info("录音已停止");
      }
		},
	}
  }
};
</script>
<style scoped>
.topIcon,
.topIcon img {
  width: 3.5rem;
  height: 3.5rem;
  margin-right: 2rem;
  position: absolute;
  right: 0;
  top: 1rem;
  cursor: pointer;
  z-index: 1;
}
.el-input,
.el-textarea {
  width: unset;
}
.SpeechRecognition {
  margin: 40px 20px 20px 20px;
}
.SpeechSynthesis {
  margin: 20px;
}
.ConversationalBots {
  border-top: 1px solid gainsboro;
  border-bottom: 1px solid gainsboro;
  padding: 20px;
}
.SRcondition {
  margin-bottom: 20px;
}
.OneSentenceAudio{
  margin-left: 10px;
}
</style>
