<template>
  <div class="mainContent">
    <div class="shiftText">
      <el-icon size="60" color="#231815">
        <Back />
      </el-icon>
      <div class="stxt">转文字</div>
    </div>
    <button @click="startIntercom" class="voiceBtn">开始对讲</button>
    <button @click="endIntercom" class="voiceBtn">关闭对讲</button>
    <div style="margin-top: 30px; font-size: 30px">
      语音识别的文字为：{{ resText || "--" }}
    </div>
    <div class="canvas-container">
      <canvas ref="waveformCanvas" width="500" height="200"></canvas>
    </div>
  </div>
</template>

<script setup>
import { ref, onMounted, onBeforeUnmount } from "vue";

const resText = ref(""); // 语音识别的文字
const ws = ref(null); // WebSocket 实例
const recorder = ref(null); // 录音机实例
const audioStream = ref(null); // 音频流
const waveformCanvas = ref(null); // Canvas 元素引用
const isRecording = ref(false); // 录音状态

// 开始录音传值数据
const startData = {
  is_speaking: true,
  mode: "2pass",
  wav_name: "h5",
};

// 结束录音传值数据
const endData = {
  is_speaking: false,
  mode: "2pass",
  wav_name: "h5",
};

// 录音机类
class AudioRecorder {
  constructor(stream, options = {}) {
    this.stream = stream;
    this.context = new (window.AudioContext || window.webkitAudioContext)();
    this.audioInput = this.context.createMediaStreamSource(stream);
    this.sampleRate = options.sampleRate || 16000;
    this.sampleBits = options.sampleBits || 16;
    this.audioData = {
      buffer: [],
      size: 0,
      inputSampleRate: this.context.sampleRate,
      inputSampleBits: 16,
      outputSampleRate: this.sampleRate,
      oututSampleBits: this.sampleBits,
      clear: () => {
        this.audioData.buffer = [];
        this.audioData.size = 0;
      },
      input: (data) => {
        this.audioData.buffer.push(new Float32Array(data));
        this.audioData.size += data.length;
      },
      compress: () => {
        const data = new Float32Array(this.audioData.size);
        let offset = 0;
        for (let i = 0; i < this.audioData.buffer.length; i++) {
          data.set(this.audioData.buffer[i], offset);
          offset += this.audioData.buffer[i].length;
        }
        const compression = Math.floor(
          this.audioData.inputSampleRate / this.audioData.outputSampleRate
        );
        const length = Math.floor(data.length / compression);
        const result = new Float32Array(length);
        for (let i = 0; i < length; i++) {
          result[i] = data[i * compression];
        }
        return result;
      },
      encodePCM: () => {
        const bytes = this.audioData.compress();
        const dataLength = bytes.length * (this.audioData.oututSampleBits / 8);
        const buffer = new ArrayBuffer(dataLength);
        const data = new DataView(buffer);
        for (let i = 0; i < bytes.length; i++) {
          const s = Math.max(-1, Math.min(1, bytes[i]));
          data.setInt16(i * 2, s < 0 ? s * 0x8000 : s * 0x7fff, true);
        }
        return new Blob([data]);
      },
    };

    this.processor = null;
    this.isRecording = false;
    this.initProcessor();
  }

  initProcessor() {
    if (this.context.audioWorklet) {
      this.context.audioWorklet
        .addModule("/js/audio-processor.js")
        .then(() => {
          this.processor = new AudioWorkletNode(
            this.context,
            "audio-processor"
          );
          this.audioInput.connect(this.processor);
          this.processor.connect(this.context.destination);
          this.setupWorkletMessaging();
        })
        .catch((err) => {
          console.error("AudioWorklet 加载失败:", err);
          this.fallbackToScriptProcessor();
        });
    } else {
      this.fallbackToScriptProcessor();
    }
  }

  fallbackToScriptProcessor() {
    console.warn("使用已弃用的 ScriptProcessor");
    this.processor = this.context.createScriptProcessor(4096, 1, 1);
    this.audioInput.connect(this.processor);
    this.processor.connect(this.context.destination);
    this.setupScriptProcessor();
  }

  setupWorkletMessaging() {
    this.processor.port.onmessage = (e) => {
      const inputBuffer = e.data;
      this.processAudioData(inputBuffer);
    };
  }

  setupScriptProcessor() {
    this.processor.onaudioprocess = (e) => {
      const inputBuffer = e.inputBuffer.getChannelData(0);
      this.processAudioData(inputBuffer);
    };
  }

  processAudioData(inputBuffer) {
    this.audioData.input(inputBuffer);
    if (
      this.isRecording &&
      ws.value &&
      ws.value.readyState === WebSocket.OPEN
    ) {
      this.sendAudioData();
      this.updateWaveform(inputBuffer);
    }
  }

  sendAudioData() {
    console.log("发送数据");
    const blob = this.audioData.encodePCM();

    if (blob.size > 0) {
      const reader = new FileReader();
      //console.log(reader);
      reader.onload = (e) => {
        const arr = new Int8Array(e.target.result);
        if (arr.length > 0) {
          for (let i = 0; i < arr.length; i += 1024) {
            const chunk = arr.slice(i, i + 1024);
            // console.log(chunk, "chunk");
            ws.value.send(chunk);
          }
        }
      };
      reader.readAsArrayBuffer(blob);
      this.audioData.clear();
    }
  }

  updateWaveform(inputBuffer) {
    const canvas = waveformCanvas.value;
    if (!canvas) return;

    const ctx = canvas.getContext("2d");
    const width = canvas.width;
    const height = canvas.height;

    ctx.clearRect(0, 0, width, height);
    ctx.fillStyle = "#106AE8";

    const step = Math.ceil(inputBuffer.length / width);
    const amp = height / 2;

    ctx.beginPath();
    for (let i = 0; i < width; i++) {
      const x = i;
      const y =
        amp + amp * Math.min(1, Math.max(-1, inputBuffer[i * step] || 0));
      if (i === 0) {
        ctx.moveTo(x, y);
      } else {
        ctx.lineTo(x, y);
      }
    }
    ctx.strokeStyle = "#106AE8";
    ctx.stroke();
  }

  start() {
    console.log("开始录音");
    const self = this; // 保存this引用
    this.isRecording = true;
    if (this.processor instanceof AudioWorkletNode) {
      // this.processor.port.postMessage({ action: "start" });
      // this.sendAudioData();
      this.processor.port.onmessage = function (e) {
        // console.log(e.data);
        const inputBuffer = e.data;
        self.audioData.input(inputBuffer);
        self.sendAudioData();
        self.updateWaveform(inputBuffer);
      };
    }
  }

  stop() {
    this.isRecording = false;
    if (this.processor instanceof AudioWorkletNode) {
      this.processor.port.postMessage({ action: "stop" });
    }
  }

  close() {
    this.stop();
    this.audioInput.disconnect();
    this.context.close();
    if (this.stream) {
      this.stream.getTracks().forEach((track) => track.stop());
    }
  }
}

/*
 * WebSocket 连接管理
 */
const initWebSocket = () => {
  if (ws.value) return;

  ws.value = new WebSocket("wss://192.168.1.70:10095");
  ws.value.binaryType = "arraybuffer";

  ws.value.onopen = () => {
    console.log("WebSocket 连接已建立");
    ws.value.send(JSON.stringify(startData));
    console.log(recorder.value, "record youzhi");
    console.log(ws.value.readyState, "ws.value.readyState");
    if (recorder.value) {
      recorder.value.start();
    }
  };

  ws.value.onmessage = (msg) => {
    console.log(msg);
    try {
      const res = JSON.parse(msg.data);
      console.log("WebSocket 消息:", res);
      resText.value = res.text;
    } catch (e) {
      console.error("解析 WebSocket 消息失败:", e);
    }
  };

  ws.value.onerror = (err) => {
    console.error("WebSocket 错误:", err);
  };

  ws.value.onclose = () => {
    console.log("WebSocket 连接已关闭");
    recorder.value?.close();
    isRecording.value = false;
  };
};

/*
 * 开始对讲
 */
const startIntercom = async () => {
  if (isRecording.value) return;

  try {
    const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
    audioStream.value = stream;
    recorder.value = new AudioRecorder(stream);
    initWebSocket();
    isRecording.value = true;
  } catch (error) {
    console.error("无法打开麦克风:", error);
    alert("无法访问麦克风，请检查权限设置");
  }
};

/*
 * 关闭对讲
 */
const endIntercom = () => {
  isRecording.value = false;
  console.log("关闭对讲");
  if (ws.value) {
    ws.value.send(JSON.stringify(endData));
    recorder.value?.stop();
    recorder.value?.close();
    if (audioStream.value) {
      audioStream.value.getTracks().forEach((track) => track.stop());
      audioStream.value = null;
    }
    if (ws.value) {
      ws.value.close();
      ws.value = null;
    }
  }
};

// 组件卸载时清理资源
onBeforeUnmount(() => {
  endIntercom();
});
</script>

<style lang="scss" scoped>
.mainContent {
  background: #dedede;
  width: 100%;
  height: 30vh;
  position: fixed;
  bottom: 0;
  display: flex;
  flex-direction: column;
  align-items: center;

  .shiftText {
    display: flex;
    align-items: center;
    margin: 25px;

    .stxt {
      width: 108px;
      height: 50px;
      color: #0960ec;
      font-size: 36px;
      margin-left: 15px;
      padding-top: 5px;
    }
  }

  .voiceBtn {
    width: 200px;
    height: 80px;
    border-radius: 36px;
    overflow: hidden;
    background-color: #ccc;
    font-size: 36px;
    margin: 0 10px;

    &:active {
      background-color: #aaa;
    }
  }

  .canvas-container {
    margin-top: 20px;
    background-color: white;
    border-radius: 8px;
    padding: 10px;
    box-shadow: 0 2px 10px rgba(0, 0, 0, 0.1);
  }
}
</style>
