<template>
  <div class="mainContent">
    <div class="shiftText">
      <el-icon size="60" color="#231815">
        <Back />
      </el-icon>
      <div class="stxt">转文字</div>
    </div>
    <button @click="startIntercom" class="voiceBtn">开始对讲</button>
    <button @click="endIntercom" class="voiceBtn">关闭对讲</button>
    <div style="margin-top: 30px; font-size: 30px">
      语音识别的文字为：{{ resText || "--" }}
    </div>
    <canvas ref="waveformCanvas" width="500" height="200"></canvas>
  </div>
</template>

<script setup>
import { ref, onMounted } from "vue";

const resText = ref(""); //语音识别的文字
/*
 * 该websocket请求需要在开始发送录音数据前和结束录音时发送一个is_speaking的状态标识，startData
数据和endData数据根据实际来传递
 */
// 开始录音传值数据
const startData = {
  is_speaking: true,
  mode: "2pass",
  wav_name: "h5",
};

// 结束录音传值数据
const endData = {
  is_speaking: false,
  mode: "2pass",
  wav_name: "h5",
};

const ws = ref(null); //实现WebSocket
const record = ref(null); //多媒体对象，用来处理音频
const waveformCanvas = ref(null); // Canvas 元素引用

function init(rec) {
  record.value = rec;
}
//录音对象
const Recorder = function (stream) {
  const sampleBits = 16; //输出采样数位 8, 16
  const sampleRate = 16000; //输出采样率
  //const context = new AudioContext();
  const context = new (window.AudioContext || window.webkitAudioContext)();
  const audioInput = context.createMediaStreamSource(stream);
  // const recorder = context.createScriptProcessor(4096, 1, 1);
  // 使用 createScriptProcessor 的替代方案：AudioWorklet（如果可用）
  let processor;

  if (context.audioWorklet) {
    // 使用 AudioWorklet（现代浏览器推荐）
    context.audioWorklet
      .addModule("audio-processor.js")
      .then(() => {
        processor = new AudioWorkletNode(context, "audio-processor");
        audioInput.connect(processor);
        processor.connect(context.destination);
      })
      .catch((err) => {
        console.error("AudioWorklet 加载失败:", err);
      });
  } else {
    console.warn(
      "createScriptProcessor is deprecated, consider using AudioWorklet"
    );
    // 回退到 createScriptProcessor（已弃用但兼容旧浏览器）
    processor = context.createScriptProcessor(4096, 1, 1);
    audioInput.connect(processor);
    processor.connect(context.destination);
  }
  const audioData = {
    type: "wav",
    size: 0, //录音文件长度
    buffer: [], //录音缓存
    inputSampleRate: 48000, //输入采样率
    inputSampleBits: 16, //输入采样数位 8, 16
    outputSampleRate: sampleRate, //输出采样数位
    oututSampleBits: sampleBits, //输出采样率
    clear: function () {
      this.buffer = [];
      this.size = 0;
    },
    input: function (data) {
      this.buffer.push(new Float32Array(data));
      this.size += data.length;
    },
    compress: function () {
      //合并压缩
      const data = new Float32Array(this.size);
      let offset = 0;
      for (let i = 0; i < this.buffer.length; i++) {
        data.set(this.buffer[i], offset);
        offset += this.buffer[i].length;
      }
      //压缩
      const compression = parseInt(
        this.inputSampleRate / this.outputSampleRate
      );
      const length = data.length / compression;
      const result = new Float32Array(length);
      let index = 0,
        j = 0;
      while (index < length) {
        result[index] = data[j];
        j += compression;
        index++;
      }
      return result;
    },
    encodePCM: function () {
      const sampleRate = Math.min(this.inputSampleRate, this.outputSampleRate);
      const sampleBits = Math.min(this.inputSampleBits, this.oututSampleBits);
      const bytes = this.compress();
      const dataLength = bytes.length * (sampleBits / 8);
      const buffer = new ArrayBuffer(dataLength);
      const data = new DataView(buffer);
      let offset = 0;
      for (let i = 0; i < bytes.length; i++, offset += 2) {
        const s = Math.max(-1, Math.min(1, bytes[i]));
        data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7fff, true);
      }
      return new Blob([data]);
    },
  };
};
let recording = true; // 添加一个标志来控制录音
const sendData = function () {
  if (!recording) return; // 如果录音已停止，则不发送数据
  //对获取的数据进行处理(分包)
  const reader = new FileReader();
  reader.onload = (e) => {
    const outbuffer = e.target.result;
    const arr = new Int8Array(outbuffer);
    if (arr.length > 0) {
      let tmparr = new Int8Array(1024);
      let j = 0;
      for (let i = 0; i < arr.byteLength; i++) {
        tmparr[j++] = arr[i];
        if ((i + 1) % 1024 == 0) {
          ws.value.send(tmparr);
          if (arr.byteLength - i - 1 >= 1024) {
            tmparr = new Int8Array(1024);
          } else {
            tmparr = new Int8Array(arr.byteLength - i - 1);
          }
          j = 0;
        }
        if (i + 1 == arr.byteLength && (i + 1) % 1024 != 0) {
          ws.value.send(tmparr);
        }
      }
    }
  };
  reader.readAsArrayBuffer(audioData.encodePCM());
  audioData.clear(); //每次发送完成则清理掉旧数据
};

/* this.start = function () {
    audioInput.connect(recorder);
    recorder.connect(context.destination);
  };

  this.stop = function () {
    recorder.disconnect();
    recording = false; // 设置录音标志为 false
  }; */
this.start = function () {
  if (processor instanceof AudioWorkletNode) {
    processor.port.onmessage = function (e) {
      const inputBuffer = e.data;
      audioData.input(inputBuffer);
      sendData();
      updateWaveform(inputBuffer);
    };
  } else {
    processor.onaudioprocess = function (e) {
      const inputBuffer = e.inputBuffer.getChannelData(0);
      audioData.input(inputBuffer);
      sendData();
      updateWaveform(inputBuffer);
    };
  }
};

this.stop = function () {
  recording = false;
  if (processor instanceof AudioWorkletNode) {
    processor.port.close();
  } else {
    processor.disconnect();
  }
};
this.getBlob = function () {
  return audioData.encodePCM();
};

this.clear = function () {
  audioData.clear();
};
// 如果是 AudioWorklet，需要通过端口通信
/* if (processor instanceof AudioWorkletNode) {
    processor.port.onmessage = function (e) {
      const inputBuffer = e.data;
      audioData.input(inputBuffer);
      sendData();
      updateWaveform(inputBuffer);
    };
  }
}; */

/* recorder.onaudioprocess = function (e) {
  const inputBuffer = e.inputBuffer.getChannelData(0);
  audioData.input(inputBuffer);
  sendData();

  // 更新音浪效果
  updateWaveform(inputBuffer);
}; */

/*
 * WebSocket
 */
const useWebSocket = () => {
  ws.value = new WebSocket("wss://192.168.1.70:10096"); //换为实际的websocket地址
  ws.value.binaryType = "arraybuffer"; //传输的是 ArrayBuffer 类型的数据
  ws.value.onopen = function () {
    ws.value.send(JSON.stringify(startData));
    console.log("握手成功", ws.value.readyState);
    if (ws.value.readyState == 1) {
      //ws进入连接状态，则每隔500毫秒发送一包数据
      record.value.start();
    }
  };

  ws.value.onmessage = function (msg) {
    const res = JSON.parse(msg.data);
    resText.value = res.text;
  };

  ws.value.onerror = function (err) {
    console.info(err);
  };
};

/*
 * 开始对讲
 */
const startIntercom = async () => {
  try {
    const mediaStream = await navigator.mediaDevices.getUserMedia({
      audio: true,
    });
    init(new Recorder(mediaStream));
    console.log("开始对讲");
    useWebSocket();
  } catch (error) {
    console.error("无法打开麦克风", error);
  }
};

/*
 * 关闭对讲
 */
const endIntercom = () => {
  if (ws.value) {
    ws.value.send(JSON.stringify(endData));
    console.log("不关闭websocket连接");
    // ws.value.close();
    record.value.stop();
  }
};

// 更新 Canvas 上的音浪效果
const updateWaveform = (inputBuffer) => {
  const canvas = waveformCanvas.value;
  const ctx = canvas.getContext("2d");
  const width = canvas.width;
  const height = canvas.height;

  // 清除画布
  ctx.clearRect(0, 0, width, height);

  // 设置波形颜色
  ctx.fillStyle = "#106AE8";

  // 绘制波形
  const numBars = 20; // 波形数量可根据实际需求设置
  const barWidth = width / (numBars * 3); // 每个波形柱的宽度
  let barHeight;
  let x = 15;

  // 遍历输入缓冲区并绘制波形
  for (let i = 0; i < numBars; i++) {
    // 计算当前波形柱的高度
    const sampleIndex = Math.floor(i * (inputBuffer.length / numBars));
    barHeight = (Math.abs(inputBuffer[sampleIndex]) * height * 6) / 2; // 使用绝对值并增加系数

    // 绘制波形柱
    ctx.fillRect(x, height / 2 - barHeight, barWidth, barHeight * 2);
    x += barWidth + 4;
  }
};

onMounted(() => {
  // 初始化 Canvas
  const canvas = waveformCanvas.value;
});
</script>
<style lang="scss" scoped>
.mainContent {
  background: #dedede;
  width: 100%;
  height: 30vh;
  position: fixed;
  bottom: 0;
  .shiftText {
    display: flex;
    align-items: center;
    margin: 25px;

    .stxt {
      width: 108px;
      height: 50px;
      color: #0960ec;
      font-size: 36px;
      margin-left: 15px;
      padding-top: 5px;
    }
  }
  .voiceBtn {
    width: 200px;
    height: 80px;
    border-radius: 36px;
    overflow: hidden;
    background-color: #ccc;
    font-size: 36px;
  }
}
</style>
