import { useState, useRef, useEffect } from 'react';

const AudioWebSocket = () => {
  const [resText, setResText] = useState('');
  const ws = useRef(null);
  const record = useRef(null);
  const waveformCanvas = useRef(null);
  const audioContextRef = useRef(null);

  // 开始/结束对讲的数据结构
  const startData = { is_speaking: true, mode: "2pass", wav_name: "h5" };
  const endData = { is_speaking: false, mode: "2pass", wav_name: "h5" };

  // 录音处理器类（完整修复：补全 audioData 结构）
  class Recorder {
    constructor(stream) {
      this.sampleBits = 16; // 输出采样数位
      this.sampleRate = 16000; // 输出采样率
      
      // 完整定义 audioData 对象（包含 input 方法）
      this.audioData = {
        type: "wav",
        size: 0, // 录音文件长度
        buffer: [], // 录音缓存
        inputSampleRate: 48000, // 输入采样率
        inputSampleBits: 16, // 输入采样数位
        outputSampleRate: this.sampleRate, // 输出采样率
        outputSampleBits: this.sampleBits, // 输出采样数位
        
        // 清理缓存
        clear: function () {
          this.buffer = [];
          this.size = 0;
        },
        
        // 向缓存中添加数据
        input: function (data) {
          this.buffer.push(new Float32Array(data));
          this.size += data.length;
        },
        
        // 合并并压缩数据
        compress: function () {
          const mergedData = new Float32Array(this.size);
          let offset = 0;
          this.buffer.forEach(chunk => {
            mergedData.set(chunk, offset);
            offset += chunk.length;
          });
          
          // 按采样率压缩（输入采样率 / 输出采样率）
          const compressionRatio = this.inputSampleRate / this.outputSampleRate;
          const compressedLength = Math.floor(mergedData.length / compressionRatio);
          const compressedData = new Float32Array(compressedLength);
          
          for (let i = 0; i < compressedLength; i++) {
            compressedData[i] = mergedData[i * compressionRatio];
          }
          return compressedData;
        },
        
        // 编码为 PCM 格式
        encodePCM: function () {
          const pcmData = this.compress();
          const byteLength = pcmData.length * (this.outputSampleBits / 8);
          const buffer = new ArrayBuffer(byteLength);
          const dataView = new DataView(buffer);
          
          for (let i = 0; i < pcmData.length; i++) {
            const value = pcmData[i];
            const intValue = value < 0 ? 
              Math.max(-1, value) * 0x8000 :  // 负数范围：-32768 ~ 0
              Math.min(1, value) * 0x7FFF;    // 正数范围：0 ~ 32767
            dataView.setInt16(i * 2, intValue, true); // 小端序
          }
          return new Blob([dataView], { type: 'audio/wav' });
        }
      };

      this.recording = true; // 录音状态标志
      
      // 初始化音频上下文（确保单例）
      audioContextRef.current = audioContextRef.current || new AudioContext();
      this.audioInput = audioContextRef.current.createMediaStreamSource(stream);
      this.recorderNode = audioContextRef.current.createScriptProcessor(4096, 1, 1);

      // 绑定音频处理回调（使用箭头函数确保 this 指向 Recorder 实例）
      this.recorderNode.onaudioprocess = (e) => {
        const inputBuffer = e.inputBuffer.getChannelData(0);
        this.audioData.input(inputBuffer);
        this.sendData();
        this.updateWaveform(inputBuffer);
      };
    }

    // 启动录音（连接音频节点）
    start() {
      this.audioInput.connect(this.recorderNode);
      this.recorderNode.connect(audioContextRef.current.destination);
    }

    // 停止录音（断开连接并标记状态）
    stop() {
      this.recording = false;
      this.recorderNode.disconnect();
    }

    // 发送数据到 WebSocket
    sendData() {
      if (!this.recording) return;
      
      const reader = new FileReader();
      reader.onload = (e) => {
        const rawData = e.target.result;
        const byteArray = new Int8Array(rawData);
        
        // 分包发送（每包 1024 字节）
        for (let i = 0; i < byteArray.length; i += 1024) {
          const chunk = byteArray.slice(i, i + 1024);
          ws.current.send(chunk);
        }
      };
      
      // 读取编码后的 PCM 数据
      reader.readAsArrayBuffer(this.audioData.encodePCM());
      this.audioData.clear(); // 清理缓存
    }

    // 更新音浪效果
    updateWaveform(inputBuffer) {
      const canvas = waveformCanvas.current;
      if (!canvas) return;
      
      const ctx = canvas.getContext('2d');
      const width = canvas.width;
      const height = canvas.height;
      
      ctx.clearRect(0, 0, width, height);
      ctx.fillStyle = '#106AE8';
      
      const numBars = 20;
      const barWidth = width / (numBars * 3);
      let x = 15;

      for (let i = 0; i < numBars; i++) {
        const sampleIndex = Math.floor(i * (inputBuffer.length / numBars));
        const barHeight = (Math.abs(inputBuffer[sampleIndex]) * height * 6) / 2;
        ctx.fillRect(x, height / 2 - barHeight, barWidth, barHeight * 2);
        x += barWidth + 4;
      }
    }
  }

  // 初始化 WebSocket
  const initWebSocket = () => {
    ws.current = new WebSocket('ws://localhost:8081/ws/a'); // 替换为实际地址
    ws.current.binaryType = 'arraybuffer';
    
    ws.current.onopen = () => {
      ws.current.send(JSON.stringify(startData));
      console.log('WebSocket 连接成功');
      if (record.current) {
        record.current.start(); // 启动录音
      }
    };

    ws.current.onmessage = (msg) => {
      const res = JSON.parse(msg.data);
      setResText(res.text); // 更新识别文字
    };

    ws.current.onerror = (err) => {
      console.error('WebSocket 错误:', err);
    };
  };

  // 开始对讲
  const startIntercom = async () => {
    try {
      // 申请麦克风权限
      const mediaStream = await navigator.mediaDevices.getUserMedia({ audio: true });
      record.current = new Recorder(mediaStream); // 创建 Recorder 实例
      initWebSocket(); // 初始化 WebSocket
    } catch (error) {
      console.error('麦克风权限申请失败:', error);
    }
  };

  // 关闭对讲
  const endIntercom = () => {
    if (ws.current) {
      ws.current.send(JSON.stringify(endData)); // 发送结束标志
      record.current?.stop(); // 停止录音
    }
  };

  // 组件卸载清理
  useEffect(() => {
    return () => {
      if (ws.current) {
        ws.current.close(); // 关闭 WebSocket
      }
      if (audioContextRef.current) {
        audioContextRef.current.close(); // 关闭音频上下文
      }
    };
  }, []);

  return (
    <div className="mainContent">
      <button onClick={startIntercom}>开始对讲</button>
      <button onClick={endIntercom}>关闭对讲</button>
      <div>语音识别的文字为：{resText || '--'}</div>
      <canvas ref={waveformCanvas} width="200" height="20" />
    </div>
  );
};

export default AudioWebSocket;
