<template>
  <div>
    <el-button
      v-show="data.showAnima == null"
      style="color: green"
      @click="voice"
      >开始录音</el-button
    >
    <el-button
      v-show="data.showAnima !== null"
      style="color: red"
      @click="handleStop"
      >停止录音</el-button
    >
    <el-button
      v-show="data.showAnima !== null"
      style="color: gray"
      @click="clearVoice"
      >清空录音</el-button
    >
    <!-- <div>{{ data.duration }}</div> -->
    <div v-if="data.showAnima == true" style="color: green">
      正在录音...{{ data.duration }}
    </div>
    <div v-else-if="data.showAnima == false" style="color: red">
      已停止录音-录音时长{{ data.duration }}
    </div>
    <div v-else></div>
    <!-- <el-button @click="handlePlay">播放录音</el-button> -->
  </div>
</template>

<script setup>
import { nextTick, onMounted, onUnmounted, reactive } from "vue-demi";
import Recorder from "js-audio-recorder";
// import Recorder from 'recorder-js';
import LZString from "lz-string";
import { ElMessage, ElMessageBox, ElNotification } from "element-plus";
import axios from "@/utils/request";
import dayjs from "dayjs";
import { Base64 } from "js-base64";
const { proxy } = getCurrentInstance();
let timer = ref(null);
const props = defineProps({
  shebei: {
    type: Array,
    default: () => [],
  },
  num: {
    type: Number,
    default: 0,
  },
});

let emit = defineEmits(["guanbi"]);

let data = reactive({
  //用于存储创建的语音对象
  recorder: null,
  formData: null,
  // 控制录音动画的显示隐藏
  showAnima: null,
  mation: true,
  isHistory: true,
  // 录音时长
  duration: "00:00:01",
  initialMs: 1000,
  form: {
    TerminalNO: "9300", //广播号码，多个用“|”分割，6001|6002|6003
    Frequency: 3, //播报次数
    webfile: {
      FileType: 1, //文件类型0mp3,1wav
      // FileSize:fileOfBlob.size,//不大于10*102481024,单位字节
      FileSize: 1024, //不大于10*102481024,单位字节
      // FileName:fileOfBlob.name//文件名称
      FileName: "录音", //文件名称
      FileDatastr: "", //音乐文件流，base64编码字符串
    },
    // FileDatastr:fileOf64//音乐文件流，base64编码字符串
  },
  boyinUrl: "/callingCenter/broadcast",
  yinpinAddUrl: "/sip/config/file/add",
});
watch(
  () => props.shebei,
  (newValue) => {
    nextTick(() => {
      data.form.TerminalNO = newValue.join("|");
    });
  }
);
watch(
  () => props.num,
  (newValue) => {
    nextTick(() => {
      data.form.Frequency = newValue;
    });
  }
);
function getStringAfterCharacter(str, character) {
  //截取字符串
  const parts = str.split(character);
  return parts.length > 1 ? parts.slice(1).join(character) : "";
}
function blobToBase64(blob) {
  console.log(blob, "blob");
  return new Promise((resolve, reject) => {
    const fileReader = new FileReader();
    fileReader.onload = (e) => {
      resolve(e.target.result);
    };
    fileReader.readAsDataURL(blob);
    fileReader.onerror = () => {
      reject(new Error("blobToBase64 error"));
    };
  });
}
function convertBlobToBase64(blob) {
  const reader = new FileReader();
  reader.onload = function (event) {
    const base64 = event.target.result;
    // 此时base64就是录音文件的base64字符串
    // 可以在这里进行进一步处理，例如压缩
    const compressedBase64 = compressBase64(base64);
    data.form.webfile.FileDatastr = compressedBase64;
    // axios.post('/Service/GetWebApiSpeakVoice',data.form).then(res => {
    //    	console.log(res);
    //   })
    console.log("压缩后的Base64:", compressedBase64);
  }.bind(this);
  reader.readAsDataURL(blob);
}
function compressBase64(base64) {
  // 使用lz-string压缩Base64字符串
  const compressed = LZString.compress(base64);
  return compressed;
}
function base64ToFile(base64Data, fileName) {
  // 分离MIME类型和纯Base64数据
  const arr = base64Data.split(",");
  const mime = arr[0].match(/:(.*?);/)[1];
  const bstr = atob(arr[1]);
  const safeFileName = fileName.includes(".")
    ? fileName
    : `${fileName}.${mime.split("/")[1]}`;
  // 转换为Uint8Array
  const uint8Array = new Uint8Array(bstr.length);
  for (let i = 0; i < bstr.length; i++) {
    uint8Array[i] = bstr.charCodeAt(i);
  }

  // 创建Blob并转为File对象
  const blob = new Blob([uint8Array], { type: mime });
  return new File([blob], safeFileName, { type: mime });
}

function submit() {
  if (data.recorder !== null) {
    data.showAnima = null;
    data.duration = "00:00:01";
    clearInterval(timer.value);
    data.initialMs = 1000; //录音时长重置
    // 发送语音的方法
    data.recorder?.pause(); // 暂停录音
    var formData = new FormData();
    var blob = data.recorder?.getWAVBlob(); //获取wav格式音频数据
    // console.log(blob, "blob");
    data.form.webfile.FileSize = blob?.size;
    // var blob = data.recorder.getWAV();
    //此处获取到blob对象后需要设置fileName满足当前项目上传需求，其它项目可直接传把blob作为		  file塞入formData
    var newbolb = new Blob([blob], { type: "audio/wav" });
    // var fileOfBlob = new File([newbolb], new Date().getTime() + '.wav')
    var fileOfBlob = new File([newbolb], "录音.wav");
    // formData.append(blob)
    formData.append("Webfile", fileOfBlob);
    //formData是传给后端的对象,
    // convertBlobToBase64(blob)
    blobToBase64(blob).then((res) => {
      console.log(res, "res969");
      // var val = getStringAfterCharacter(res, "base64,");
      let value = base64ToFile(res, "录音");
      // data.form.webfile.FileDatastr = val;
      const formData = new FormData();
      formData.append("file", value);
      // 上传录音文件
      axios
        .post("/common/uploadUUID", formData, {
          headers: {
            "Content-Type": "multipart/form-data", // 必须设置:ml-citation{ref="6,7" data="citationList"}
          },
        })
        .then((response) => {
          if (response.code == 200) {
            let newuuid = response.newFileName.split(".")[0];

            //音频管理中新增接口
            axios
              .post(data.yinpinAddUrl, {
                uuid: newuuid,
                fileName: response.originalFilename,
                originalName: response.originalFilename,
                originalPath: response.fileName,
                convertedPath: response.convertedPath,
                fileSuffix: response.fileSuffix,
                duration: response.duration,
                url: response.url,
                type: 1, //0=音乐库,1=录音,2=tts
              })
              .then((res) => {
                if (res.code == 200) {
                  //播放音频
                  axios
                    .post(data.boyinUrl, {
                      guestNumbers: props.shebei,
                      fileUuid: [newuuid],
                      loopType: 0, //0次数,1时长
                      loopCount: props.num,
                    })
                    .then((res) => {
                      if (res.code == 200) {
                        emit("refreshData");
                        ElMessage.success(res.msg);
                        handleDestroy();
                      } else {
                        ElMessage.error(res.msg);
                      }
                    });
                } else {
                  ElMessage.error(res.msg);
                }
              });
          }
        })
        .catch((error) => {
          console.error("上传失败", error);
        });

      // axios.post("/Service/GetWebApiSpeakVoice", data.form).then((res) => {
      //   if (res.Result == true) {
      //     ElMessage.success(res.Msg);
      //     (data.form.webfile = {
      //       FileType: 1, //文件类型0mp3,1wav
      //       // FileSize:fileOfBlob.size,//不大于10*102481024,单位字节
      //       FileSize: 1024, //不大于10*102481024,单位字节
      //       // FileName:fileOfBlob.name//文件名称
      //       FileName: "录音", //文件名称
      //       FileDatastr: "", //音乐文件流，base64编码字符串
      //     }),
      //     emit("refreshData");
      //       handleDestroy();
      //   } else {
      //     ElMessage.error(res.Msg);
      //   }
      // });
    });
  }
}
// 录音按钮的点击事件
function voice() {
  //必须标准WAV文件，采样要求8k 16位 1通道
  //实例化语音对象
  data.recorder = new Recorder({
    sampleBits: 16, // 采样位数，支持 8 或 16，默认是16
    sampleRate: 8000, // 采样率，支持 11025、16000、22050、24000、44100、48000，根据浏览器默认值，我的chrome是48000
    numChannels: 1, // 声道，支持 1 或 2， 默认是1
  });
  //记录开始录音的时间
  timer.value = setInterval(() => {
    data.initialMs += 1000; // 每秒增加1000毫秒
    data.duration = msToTime(data.initialMs);
  }, 1000);
  data.showAnima = true;
  data.recorder.start(); // 开始录音
  console.log(data.recorder, "开始录音");
  // Recorder.getPermission().then(
  //   () => {
  //     console.log("开始录音");

  //   },
  //   (error) => {
  //     ElMessage.warning("请先插入麦克风或允许该网页使用麦克风!");
  //     console.log(`${error.name} : ${error.message}`);
  //   }
  // );
}
function handleStop() {
  console.log(data.recorder, "停止录音");
  // data.recorder.stop() // 停止录音
  clearInterval(timer.value);
  data.recorder.pause();
  data.mation = false;
  data.showAnima = false;
}
function clearVoice() {
  clearInterval(timer.value);
  data.showAnima = null;
  data.recorder = null;
  data.initialMs = 1000; //录音时长重置
  data.duration = "00:00:01";
}
function handlePlay() {
  console.log("播放录音");
  data.recorder.play(); // 播放录音
}
function handleDestroy() {
  console.log("销毁实例");
  if (data.recorder) {
    data.recorder.destroy(); // 毁实例
  }
}

function msToTime(ms) {
  // 获取小时、分钟和秒
  var hours = Math.floor(ms / 3600000);
  var minutes = Math.floor((ms % 3600000) / 60000);
  var seconds = Math.floor(((ms % 3600000) % 60000) / 1000);

  // 补零
  hours = hours < 10 ? "0" + hours : hours;
  minutes = minutes < 10 ? "0" + minutes : minutes;
  seconds = seconds < 10 ? "0" + seconds : seconds;

  // 返回格式化的时间字符串
  return hours + ":" + minutes + ":" + seconds;
}
defineExpose({ timer, submit, data });
onMounted(() => {});
</script>

<style>
</style>