<template>
  <div class="chatBox">
    <div class="analyser">
      <audio
        :src="voicedata1"
        playsinline
        controls
        @play="
          (e) => {
            initAudioAnalyser(e.target, 'audiodom');
          }
        "
        @pause="
          () => {
            isRecording = false;
          }
        "
      ></audio>
      <!-- 绘制音频波浪 -->
      <canvas
        id="jump"
        :style="{ zIndex: isRecording ? '102' : '-1' }"
      ></canvas>
    </div>

    <div class="record">
      <audio id="audioPlayer" controls></audio>
      <button
        class="recordBtn"
        @pointerdown="debounceStartRecord"
        @pointerup="debounceEndRecord"
        @pointercancel="debounceEndRecord"
        @selectstart.prevent
      >
        按下录音抬起停止
      </button>
      <div style="height: 54px; line-height: 54px">
        录音状态：{{ isRecording ? "正在录音..." : "未录音" }}
      </div>
    </div>

    <div class="testbuffer">
      <button @click="initAudioAnalyser(voicedata2.voice, 'arraybuffer')">
        测试pcmbase64str转buffer-进行音频可视化
      </button>
      <button @click="initAudioAnalyser(voicedata3.voice, 'arraybuffer')">
        测试base64str2转buffer-进行音频可视化
      </button>
    </div>

    <!-- 测试audio-recorder-polyfill使用，录音结束guan -->
    <!-- <div class="testbuffer">
      <button @click="recordButton">开始录音</button>
      <button @click="stopButton">结束录音</button>
    </div> -->

    <!-- 控制input聚焦 -->
    <!-- <input type="text" @compositionend="testInput('compositionend')" @change="testInput('change')" /> -->
    <!-- <button @click="controlFocus">控制input显示</button>
    <div class="inputBox" @click.stop v-show="isShowInput">
      <input id="inputCtx" placeholder="测试" />
    </div> -->
  </div>
</template>

<script lang="ts" setup>
import { ref, onMounted, nextTick } from "vue";
import voicedata1 from "./testdata/test_16000.wav";
import voicedata2 from "./testdata/pcmbase64str.json"; //无效base64音频数据
import voicedata3 from "./testdata/base64str2.json"; //有效base64音频数据
import { base64ToUint8Array, bufferToAudioUrl } from "../../utils/dataTransfer";
import AudioRecorder from "audio-recorder-polyfill";
import { debounce } from "../../utils/debounce";

let isRecording = ref<boolean>(false);
let ctx: any = null;
let width: any = 0; //canvas的宽
let height: any = 0; //canvas的高
let mediaStream: any = null;
let mediaRecorder: any = null;
let analyser: any = null;
let chunks = [] as any; // 用于存储录音数据块
let dataArray: any = []; //音频可视化buffer数组
let source: any = null;
const audCtx: any = new AudioContext(); // 创建音频上下文
const isShowInput = ref<boolean>(true);

// const testInput = (type: any) => {
//   // 监听是否触发输入发完成事件
//   console.log(" 监听是否shu'ru'fa输入结束", type);
// };

const startRecord = async () => {
  console.log("开始录音");
  if (!isRecording.value) {
    try {
      if (!mediaStream) {
        mediaStream = await navigator.mediaDevices.getUserMedia({
          audio: true,
        });
      }
      mediaRecorder = new MediaRecorder(mediaStream);
      mediaRecorder.ondataavailable = (event: any) => {
        if (event.data.size > 0) {
          chunks.push(event.data);
        }
      };
      mediaRecorder.start();
      isRecording.value = true;
      // 音频可视化
      initAudioAnalyser(mediaStream, "mediastream");
      animationLoop();
    } catch (error) {
      console.error("Error starting recording:", error);
    }
  }
};

const endRecord = () => {
  console.log("结束录音", mediaRecorder);
  mediaRecorder.stop();
  mediaRecorder.onstop = async () => {
    const audioBlob = new Blob(chunks, { type: "audio/ogg; codecs=opus" });
    chunks = [];
    const audioURL = URL.createObjectURL(audioBlob);
    const audioPlayer: any = document.getElementById("audioPlayer");
    audioPlayer.src = audioURL;
    isRecording.value = false;
  };
};

// 绘制音频波动的函数
const draw = () => {
  if (!analyser) {
    return;
  }
  analyser.getByteFrequencyData(dataArray); //让分析器节点分析出数据到数组中
  const volumeThreshold = 40; // 设置音量阈值，只有当音量高于这个值时才更新可视化
  // 计算平均振幅
  let sum = 0;
  for (let i = 0; i < dataArray.length; i++) {
    sum += dataArray[i];
  }
  const averageAmplitude = sum / dataArray.length; // 计算平均振幅
  console.log(audCtx.state, dataArray, averageAmplitude, volumeThreshold);

  // 根据平均振幅决定是否更新可视化
  if (averageAmplitude > volumeThreshold) {
    // 更新可视化效果
    // console.log("音量足够，更新可视化")
    /*
      dataArray一直为0,可能导致的原因
      （1）音频未正确播放，audCtx.state不是running
      （2）source是否正确连接analyser，source.connect(analyser)
      （3）getByteFrequencyData调用时机不正确，是否音频播放后调用,需要持续调用,volumeThreshold设置音量
      （4）采样率不匹配（audCtx.sampleRate和audiobuffer.sampleRate）
      （5）调试时的时间延迟问题：AnalyserNode必须在音频播放一定时间后才能提供有效的数据

      如果音频源被静音或音量为0
    */
    // console.log("音乐节点", dataArray)
    const bufferLength = dataArray.length / 2.5; //一般两半波幅
    const barWidth = width / bufferLength;
    // 清空画布
    ctx.clearRect(0, 0, width, height);
    ctx.fillStyle = "#000000";
    for (let i = 0; i < bufferLength; i++) {
      const data = dataArray[i]; //<256
      const barHeight = (data / 255) * height; // 乘以height放大波幅
      // console.log(barHeight)
      // const x = i * barWidth
      const x1 = i * barWidth + width / 2;
      const x2 = width / 2 - (i + 1) * barWidth;
      // const y = height - barHeight //底部对齐
      const y = (height - barHeight) / 2; //中心对其
      // ctx?.fillRect(x, y, barWidth - 3, barHeight)
      ctx?.fillRect(x1, y, barWidth - 4, barHeight);
      ctx?.fillRect(x2, y, barWidth - 4, barHeight);
    }
  } else {
    // 音量较低，不更新可视化
    // console.log("音量较低，跳过可视化更新")
    ctx.clearRect(0, 0, width, height);
  }
};

const animationLoop = () => {
  if (!isRecording.value) {
    return;
  }
  draw();
  requestAnimationFrame(animationLoop);
};

// 音频分析器
const initAudioAnalyser = async (
  data: any,
  type: "audiodom" | "mediastream" | "arraybuffer"
) => {
  // console.log(data, type);
  if (data) {
    // 创建音频节点source可以3种方式：audiodom获取，MediaStream媒体流，音频转成AudioBuffer
    if (type == "audiodom") {
      isRecording.value = true;
      animationLoop();
      source = audCtx.createMediaElementSource(data);
    } else if (type == "mediastream") {
      source = audCtx.createMediaStreamSource(data);
    } else if (type == "arraybuffer") {
      isRecording.value = true;
      animationLoop();
      source = audCtx.createBufferSource();
      const uint8Array = base64ToUint8Array(data);
      dataArray = uint8Array;
      const arrayBuffer = uint8Array.buffer;
      /*
            返回audiodom实现思路
      const audioUrl = bufferToAudioUrl(arrayBuffer);
      const audioPlayer: any = document.getElementById("audioPlayer");
      audioPlayer.src = audioUrl;
      audioPlayer.play();
      source = audCtx.createMediaElementSource(audioPlayer);
      */

      if (arrayBuffer instanceof ArrayBuffer) {
        try {
          const audioBuffer = await audCtx.decodeAudioData(arrayBuffer);
          console.log("audioBuffer 解码成功:", audioBuffer);
          source.buffer = audioBuffer;
        } catch (error: any) {
          isRecording.value = false;
          console.error("解码音频数据时出错:", error);
          // 报解码音频数据时出错: DOMException: Failed to execute 'decodeAudioData' on 'BaseAudioContext': Unable to decode audio data
          // 这是因为 ArrayBuffer 中的数据不是有效的音频文件格式
        }
      }
    }
    analyser = audCtx.createAnalyser();
    analyser.fftSize = 256; // 设置 FFT 大小，影响频率分析的精度,常见的值有 128, 256, 512, 1024, 2048
    console.log(
      "Audio stream connected to AnalyserNode:",
      source.numberOfOutputs > 0
    ); //为true表明已经连接到一个源
    //创建数组，用于接收分析器节点的分析数据
    dataArray = new Uint8Array(analyser.frequencyBinCount); //analyser.frequencyBinCount表示当前频率的数组长度
    source.connect(analyser);
    // analyser.connect(audCtx.destination); //输出设备 播放声音（如果需要将音频输出到扬声器，可以再连接到 audCtx.destination：）

    if (audCtx.state === "suspended") {
      // 如果你的代码在没有用户交互的情况下执行，AudioContext 可能不会从 suspended 状态变为 running
      source.start(0);
      source.onended = function () {
        console.log("音频播放结束");
        isRecording.value = false;
      };
    }
    console.log(
      "initAudioAnalyser",
      audCtx.state,
      audCtx.sampleRate,
      dataArray
    );
  }
};

// 模拟音频分析
// const simulateAudioAnalysis = () => {
//   // 清空画布
//   ctx.clearRect(0, 0, width, height);
//   const dataArray = new Uint8Array(128); // 假设我们分析了128个频率段

//   // 模拟填充数据，实际中这些值会由 AnalyserNode 提供
//   for (let i = 0; i < dataArray.length; i++) {
//     // 模拟一个简单的正弦波形，其中频率较低的振幅较高，高频部分振幅较低
//     // dataArray[i] = Math.sin((i * Math.PI * 2) / dataArray.length) * 127 + 128
//     dataArray[i] = Math.floor(Math.random() * 256);
//   }
//   console.log("模拟音乐节点", dataArray, width, height);
//   const bufferLength = dataArray.length / 2.5; //一般两半波幅
//   const barWidth = width / bufferLength;
//   // const barWidth = width / (bufferLength * 2) //对称增加一半数据，可能看不清
//   // const barWidth = 2  //自定义
//   // console.log(barWidth)
//   ctx.fillStyle = "#000000";
//   for (let i = 0; i < bufferLength; i++) {
//     const data = dataArray[i]; //<256
//     const barHeight = (data / 255) * height; // 乘以height放大波幅
//     // const x = i * barWidth
//     const x1 = i * barWidth + width / 2;
//     const x2 = width / 2 - (i + 1) * barWidth;
//     // const y = height - barHeight //底部对齐
//     const y = (height - barHeight) / 2; //中心对其
//     // ctx?.fillRect(x, y, barWidth - 3, barHeight)
//     ctx?.fillRect(x1, y, barWidth - 4, barHeight);
//     ctx?.fillRect(x2, y, barWidth - 4, barHeight);
//   }
//   draw();
// };
let recorder: any = null;
const recordButton = () => {
  navigator.mediaDevices.getUserMedia({ audio: true }).then((stream) => {
    recorder = new AudioRecorder(stream);
    recorder.start();
    isRecording.value = true;
    // 音频可视化
    initAudioAnalyser(stream, "mediastream");
    animationLoop();
  });
};
const stopButton = () => {
  recorder.stop();
  recorder.addEventListener("dataavailable", (e: any) => {
    isRecording.value = false;
    console.log("dataavailable", e);
  });
  recorder.stream.getTracks().forEach((track: any) => track.stop());
};
const debounceStartRecord = debounce(() => {
  recordButton();
}, 500);
const debounceEndRecord = debounce(() => {
  stopButton();
}, 500);

onMounted(() => {
  // 获取canvas画布
  const canvas: any = document.getElementById("jump");
  ctx = canvas.getContext("2d");
  width = canvas.width;
  height = canvas.height;

  // simulateAudioAnalysis();
});

// const controlFocus = () => {
//   isShowInput.value = !isShowInput.value;
//   if (isShowInput.value) {
//     nextTick(() => {
//       const inptCtxDom: any = document.getElementById("inputCtx");
//       inptCtxDom.focus();
//     });
//   } else {
//     nextTick(() => {
//       const inptCtxDom: any = document.getElementById("inputCtx");
//       inptCtxDom.blur();
//     });
//   }
// };
</script>

<style scoped lang="scss">
.chatBox {
  height: 100vh;
  width: 100vw;
  position: relative;
  display: block;
  overflow: hidden;

  > div {
    margin: 10px;
    border: 1px solid #000;
    display: flex;
    justify-content: flex-start;
    align-content: center;
  }

  button,
  audio {
    margin-right: 10px;
  }
}

#jump {
  width: 80%;
  height: 100px;
}
</style>
../../utils/dataTransfer