import { useState, useRef, useEffect } from 'react';

const SpeechTranscriber = () => {
  const [transcript, setTranscript] = useState('');
  const [connectionMessage, setConnectionMessage] = useState('');
  const ws = useRef(null);
  const record = useRef(null);
  const waveformCanvas = useRef(null);
  const audioContextRef = useRef(null);
  const audioChunk = useRef(new Uint8Array());
  const isSending = useRef(false);

  const WS_ENDPOINT = 'ws://localhost:8070/middleware/api/transcribe';

  class Recorder {
    constructor(stream) {
      this.sampleRate = 16000;
      this.sampleBits = 16;
      this.channel = 1;
      this.frameDuration = 40;
      this.targetSamples = this.sampleRate * (this.frameDuration / 1000); 

      this.audioContext = audioContextRef.current || new (window.AudioContext || window.webkitAudioContext)();
      this.source = this.audioContext.createMediaStreamSource(stream);
      // 替换为更现代的ScriptProcessorNode（部分浏览器需要webkit前缀）
      this.processor = this.audioContext.createScriptProcessor(1024, 1, 1);

      this.audioBuffer = new Float32Array(0); 
      this.processor.onaudioprocess = (e) => this.processAudio(e);
      this.source.connect(this.processor);
      this.processor.connect(this.audioContext.destination);
    }

    processAudio(e) {
      const input = e.inputBuffer.getChannelData(0);
      if (input.length === 0) return; // 过滤空数据

      this.updateWaveform(input); // 实时更新音浪
      
      // 累积音频数据用于发送
      this.audioBuffer = this.concatFloat32Arrays(this.audioBuffer, input);
      if (this.audioBuffer.length >= this.targetSamples) {
        const targetChunk = this.audioBuffer.subarray(0, this.targetSamples);
        this.audioBuffer = this.audioBuffer.subarray(this.targetSamples);
        this.encodePCM(targetChunk);
      }
    }

    concatFloat32Arrays(a, b) {
      const result = new Float32Array(a.length + b.length);
      result.set(a);
      result.set(b, a.length);
      return result;
    }

    encodePCM(input) {
      const pcmBuffer = new ArrayBuffer(input.length * 2);
      const view = new DataView(pcmBuffer);
      
      for (let i = 0; i < input.length; i++) {
        const sample = Math.max(-1, Math.min(input[i], 1));
        view.setInt16(i * 2, sample * 0x7FFF, true);
      }
      
      audioChunk.current = new Uint8Array(pcmBuffer);
      this.sendChunk();
    }

    sendChunk() {
      if (ws.current && !isSending.current) {
        isSending.current = true;
        ws.current.send(audioChunk.current);
        setTimeout(() => isSending.current = false, this.frameDuration);
      }
    }

    updateWaveform(input) {
      const canvas = waveformCanvas.current;
      if (!canvas) return;

      const ctx = canvas.getContext('2d');
      const width = canvas.width;
      const height = canvas.height;
      const centerY = height / 2;

      // 1. 清空画布（使用透明清除更彻底）
      ctx.clearRect(0, 0, width, height);

      // 2. 绘制背景渐变（更美观）
      const bgGradient = ctx.createLinearGradient(0, 0, 0, height);
      bgGradient.addColorStop(0, '#f8f9fa');
      bgGradient.addColorStop(1, '#e9ecef');
      ctx.fillStyle = bgGradient;
      ctx.fillRect(0, 0, width, height);

      // 3. 绘制动态音浪（关键修复：优化数据映射）
      const maxAmplitude = 0.8; // 限制最大振幅（避免超出画布）
      const barSpacing = 1; // 柱子间距
      const barWidth = (width / input.length) * 2; // 动态调整柱子宽度（根据数据量）
      const visibleBars = Math.min(input.length, width / (barWidth + barSpacing)); // 限制可见柱子数量

      ctx.fillStyle = '#2563eb';
      ctx.strokeStyle = '#1d4ed8';
      ctx.lineWidth = 0.5;

      for (let i = 0; i < visibleBars; i++) {
        const sample = input[i * Math.floor(input.length / visibleBars)];
        const amplitude = Math.min(Math.abs(sample), maxAmplitude); // 限制最大振幅
        const barHeight = amplitude * centerY * 1.5; // 放大显示效果（1.5倍增益）
        
        const x = i * (barWidth + barSpacing);
        const y = centerY - barHeight / 2;

        // 绘制圆角柱子（增加立体感）
        ctx.beginPath();
        ctx.roundRect(x, y, barWidth, barHeight, 2);
        ctx.fill();
        ctx.stroke();
      }

      // 4. 绘制动态边框（增加交互感）
      ctx.strokeStyle = '#d1d5db';
      ctx.lineWidth = 1;
      ctx.strokeRect(0, 0, width, height);
    }
  }

  const initWebSocket = () => {
    ws.current = new WebSocket(WS_ENDPOINT);
    ws.current.binaryType = 'arraybuffer';

    ws.current.onopen = () => {
      console.log('已连接到后端代理');
      setConnectionMessage('连接成功，开始录音即可转写');
    };

    ws.current.onmessage = (event) => {
      try {
        const parsedData = JSON.parse(event.data);
        const { errno, data } = parsedData;

        if (errno === 8 || errno === 0) {
          setTranscript(prev => prev + data.onebest);
        } else if (errno === 9) {
          setTranscript(prev => prev + '\n' + data.onebest);
        }
        setConnectionMessage('');
      } catch (error) {
        const message = event.data;
        console.log('收到非JSON消息:', message);
        setConnectionMessage(message);
      }
    };

    ws.current.onerror = (err) => {
      console.error('WebSocket错误:', err);
      setConnectionMessage('连接异常，请检查服务端');
    };
  };

  const startRecording = async () => {
    try {
      const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
      audioContextRef.current = new (window.AudioContext || window.webkitAudioContext)();
      record.current = new Recorder(stream);
      initWebSocket();
    } catch (error) {
      console.error('麦克风权限失败:', error);
      setConnectionMessage('请允许麦克风权限后重试');
    }
  };

  const stopRecording = () => {
    if (ws.current) {
      ws.current.send(new Uint8Array());
      ws.current.close();
      ws.current = null;
    }
    if (record.current?.processor) {
      record.current.processor.disconnect();
      record.current.source.disconnect();
    }
    if (audioContextRef.current) {
      audioContextRef.current.close();
      audioContextRef.current = null;
    }
  };

  useEffect(() => {
    return () => {
      stopRecording();
    };
  }, []);

  return (
    <div className="p-4 max-w-2xl mx-auto">
      <div className="mb-4">
        <button 
          onClick={startRecording} 
          className="px-4 py-2 bg-blue-600 text-white rounded mr-2 hover:bg-blue-700 transition-colors"
        >
          开始录音
        </button>
        <button 
          onClick={stopRecording} 
          className="px-4 py-2 bg-red-600 text-white rounded hover:bg-red-700 transition-colors"
        >
          停止录音
        </button>
      </div>

      {connectionMessage && (
        <div className="mb-4 p-2 bg-yellow-50 text-yellow-700 rounded text-sm">
          状态提示：{connectionMessage}
        </div>
      )}

      <div className="mb-4">
        <h3 className="text-sm font-medium text-gray-700 mb-2">转写结果：</h3>
        <div className="border p-3 rounded min-h-[150px] bg-white shadow-sm">
          {transcript || '（等待录音中...）'}
        </div>
      </div>

      <div>
        <h3 className="text-sm font-medium text-gray-700 mb-2">实时音浪：</h3>
        <canvas 
          ref={waveformCanvas} 
          width="400" 
          height="80" 
          className="border rounded-lg bg-white shadow-sm"
          // 关键：添加CSS保持宽高比（避免父容器缩放导致变形）
          style={{ width: '100%', height: 'auto', maxWidth: '400px' }}
        />
      </div>
    </div>
  );
};

export default SpeechTranscriber;
