<!DOCTYPE html>
<html lang="en">
<head>
  <meta charset="UTF-8">
  <meta name="viewport" content="width=device-width, initial-scale=1.0">
  <title>原始音频数据处理示例</title>
  <style>
    body {
      font-family: Arial, sans-serif;
      max-width: 800px;
      margin: 0 auto;
      padding: 20px;
      line-height: 1.6;
    }
    
    h1 {
      color: #333;
      border-bottom: 1px solid #eee;
      padding-bottom: 10px;
    }
    
    .container {
      margin-top: 20px;
    }
    
    .section {
      margin-bottom: 30px;
      padding: 15px;
      border: 1px solid #ddd;
      border-radius: 5px;
      background-color: #f9f9f9;
    }
    
    .section h2 {
      margin-top: 0;
      font-size: 1.2em;
      color: #444;
    }
    
    button {
      padding: 8px 16px;
      background-color: #4CAF50;
      color: white;
      border: none;
      border-radius: 4px;
      cursor: pointer;
      margin-right: 10px;
      margin-bottom: 10px;
    }
    
    button:hover {
      background-color: #45a049;
    }
    
    button:disabled {
      background-color: #cccccc;
      cursor: not-allowed;
    }
    
    .audio-level {
      height: 20px;
      background-color: #eee;
      border-radius: 3px;
      margin-top: 10px;
      overflow: hidden;
    }
    
    .audio-level-bar {
      height: 100%;
      width: 0%;
      background-color: #4CAF50;
      transition: width 0.1s ease-out;
    }
    
    .status {
      margin-top: 10px;
      padding: 10px;
      background-color: #f5f5f5;
      border-radius: 3px;
    }
    
    .log-container {
      height: 200px;
      overflow-y: auto;
      background-color: #f5f5f5;
      padding: 10px;
      border-radius: 3px;
      font-family: monospace;
      font-size: 0.9em;
      margin-top: 10px;
    }
    
    .audio-data-stats {
      display: flex;
      gap: 20px;
      margin-top: 10px;
      margin-bottom: 10px;
    }
    
    .audio-data-stats div {
      flex: 1;
      padding: 10px;
      background-color: #f0f0f0;
      border-radius: 3px;
      text-align: center;
    }
    
    .audio-data-stats span {
      font-weight: bold;
      font-size: 1.2em;
    }
    
    .waveform-container {
      height: 100px;
      background-color: #f0f0f0;
      border-radius: 3px;
      margin-top: 10px;
      position: relative;
      overflow: hidden;
    }
    
    .waveform {
      position: absolute;
      top: 0;
      left: 0;
      width: 100%;
      height: 100%;
    }
    
    .controls {
      display: flex;
      flex-wrap: wrap;
      gap: 10px;
      margin-bottom: 15px;
    }
    
    .control-group {
      display: flex;
      flex-direction: column;
      margin-right: 20px;
    }
    
    .control-group label {
      margin-bottom: 5px;
      font-weight: bold;
    }
    
    select, input {
      padding: 5px;
      border-radius: 3px;
      border: 1px solid #ddd;
    }
    
    .code-example {
      background-color: #f5f5f5;
      padding: 15px;
      border-radius: 5px;
      overflow-x: auto;
      font-family: monospace;
      margin-top: 10px;
    }
  </style>
</head>
<body>
  <h1>原始音频数据处理示例</h1>
  
  <div class="container">
    <div class="section">
      <h2>音频数据处理与可视化</h2>
      <p>此示例展示了如何访问、处理和可视化原始音频数据，为与第三方语音识别服务集成做准备。</p>
      
      <div class="controls">
        <div class="control-group">
          <button id="start-btn">开始录音</button>
          <button id="stop-btn" disabled>停止录音</button>
        </div>
        
        <div class="control-group">
          <label for="buffer-size">缓冲区大小:</label>
          <select id="buffer-size">
            <option value="256">256 样本</option>
            <option value="512">512 样本</option>
            <option value="1024">1024 样本</option>
            <option value="2048">2048 样本</option>
            <option value="4096" selected>4096 样本</option>
            <option value="8192">8192 样本</option>
            <option value="16384">16384 样本</option>
          </select>
        </div>
        
        <div class="control-group">
          <label for="sample-rate">目标采样率:</label>
          <select id="sample-rate">
            <option value="8000">8 kHz</option>
            <option value="16000" selected>16 kHz</option>
            <option value="22050">22.05 kHz</option>
            <option value="44100">44.1 kHz</option>
            <option value="48000">48 kHz</option>
          </select>
        </div>
        
        <div class="control-group">
          <label for="processing-enabled">启用处理:</label>
          <input type="checkbox" id="processing-enabled" checked>
        </div>
      </div>
      
      <div class="audio-level">
        <div id="level-bar" class="audio-level-bar"></div>
      </div>
      
      <div class="status">
        <div id="mic-status">麦克风状态: 未启动</div>
      </div>
      
      <div class="waveform-container">
        <canvas id="waveform" class="waveform"></canvas>
      </div>
      
      <div class="audio-data-stats">
        <div>
          原始采样率: <span id="original-sample-rate">0</span> Hz
        </div>
        <div>
          目标采样率: <span id="target-sample-rate">16000</span> Hz
        </div>
        <div>
          缓冲区大小: <span id="current-buffer-size">0</span> 样本
        </div>
        <div>
          回调次数: <span id="callback-count">0</span>
        </div>
      </div>
      
      <div class="log-container" id="audio-data-log"></div>
    </div>
    
    <div class="section">
      <h2>音频数据转换</h2>
      <p>以下是将原始音频数据转换为适合语音识别服务的格式的示例代码：</p>
      
      <div class="code-example">
        <pre>
// 将Float32Array转换为Int16Array (16位PCM)
function convertToInt16(float32Array) {
  const int16Array = new Int16Array(float32Array.length);
  
  for (let i = 0; i < float32Array.length; i++) {
    // 将-1.0到1.0的浮点数转换为-32768到32767的整数
    const s = Math.max(-1, Math.min(1, float32Array[i]));
    int16Array[i] = s < 0 ? s * 0x8000 : s * 0x7FFF;
  }
  
  return int16Array;
}

// 重采样音频数据（简化版，仅用于演示）
// 注意：这是一个简单的线性插值实现，音质有限
// 在实际应用中，强烈建议使用专业的重采样库，如：
// - resampler.js: https://github.com/taisel/XAudioJS/blob/master/resampler.js
// - audio-resampler: https://www.npmjs.com/package/audio-resampler
// - web-audio-api-resampler: https://github.com/echo66/node-web-audio-api-resampler
function resampleAudio(audioData, originalSampleRate, targetSampleRate) {
  if (originalSampleRate === targetSampleRate) {
    return audioData;
  }
  
  const ratio = originalSampleRate / targetSampleRate;
  const newLength = Math.round(audioData.length / ratio);
  const result = new Float32Array(newLength);
  
  for (let i = 0; i < newLength; i++) {
    const pos = i * ratio;
    const index = Math.floor(pos);
    const fraction = pos - index;
    
    if (index + 1 < audioData.length) {
      // 线性插值
      result[i] = audioData[index] * (1 - fraction) + audioData[index + 1] * fraction;
    } else {
      result[i] = audioData[index];
    }
  }
  
  return result;
}

// 将ArrayBuffer转换为Base64字符串
function arrayBufferToBase64(buffer) {
  let binary = '';
  const bytes = new Uint8Array(buffer);
  const len = bytes.byteLength;
  
  for (let i = 0; i < len; i++) {
    binary += String.fromCharCode(bytes[i]);
  }
  
  return window.btoa(binary);
}

// 发送到语音识别服务
async function sendToSpeechRecognitionService(audioData, sampleRate) {
  // 将音频数据转换为Base64
  const base64Data = arrayBufferToBase64(audioData.buffer);
  
  // 发送到服务
  const response = await fetch('https://your-api-endpoint/speech-recognition', {
    method: 'POST',
    headers: {
      'Content-Type': 'application/json',
      'Authorization': 'Bearer YOUR_API_KEY'
    },
    body: JSON.stringify({
      format: 'pcm',
      rate: sampleRate,
      channel: 1,
      speech: base64Data
    })
  });
  
  return await response.json();
}
        </pre>
      </div>
    </div>
  </div>
  
  <script type="module">
    import MicrophoneSDK from '../MicrophoneSDK.js';
    
    // 等待DOM加载完成
    document.addEventListener('DOMContentLoaded', async () => {
      // 初始化SDK
      await MicrophoneSDK.initialize();
      
      // 获取DOM元素
      const startBtn = document.getElementById('start-btn');
      const stopBtn = document.getElementById('stop-btn');
      const levelBar = document.getElementById('level-bar');
      const micStatus = document.getElementById('mic-status');
      const waveformCanvas = document.getElementById('waveform');
      const bufferSizeSelect = document.getElementById('buffer-size');
      const sampleRateSelect = document.getElementById('sample-rate');
      const processingEnabledCheckbox = document.getElementById('processing-enabled');
      
      const originalSampleRateEl = document.getElementById('original-sample-rate');
      const targetSampleRateEl = document.getElementById('target-sample-rate');
      const currentBufferSizeEl = document.getElementById('current-buffer-size');
      const callbackCountEl = document.getElementById('callback-count');
      const audioDataLog = document.getElementById('audio-data-log');
      
      // 音频数据回调变量
      let unsubscribeAudioData = null;
      let callbackCount = 0;
      let lastAudioData = null;
      
      // 设置画布大小
      function setupCanvas() {
        const container = waveformCanvas.parentElement;
        waveformCanvas.width = container.clientWidth;
        waveformCanvas.height = container.clientHeight;
      }
      
      // 绘制波形
      function drawWaveform(audioData) {
        if (!audioData || !waveformCanvas) return;
        
        const ctx = waveformCanvas.getContext('2d');
        const width = waveformCanvas.width;
        const height = waveformCanvas.height;
        const centerY = height / 2;
        const step = Math.max(1, Math.floor(audioData.length / width));
        
        // 清除画布
        ctx.clearRect(0, 0, width, height);
        
        // 设置绘图样式
        ctx.strokeStyle = '#4CAF50';
        ctx.lineWidth = 2;
        ctx.beginPath();
        
        // 绘制波形
        for (let i = 0; i < width; i++) {
          const dataIndex = i * step;
          if (dataIndex < audioData.length) {
            // 将-1到1的值映射到画布高度
            const y = centerY - (audioData[dataIndex] * centerY * 0.8);
            if (i === 0) {
              ctx.moveTo(i, y);
            } else {
              ctx.lineTo(i, y);
            }
          }
        }
        
        ctx.stroke();
      }
      
      // 将Float32Array转换为Int16Array (16位PCM)
      function convertToInt16(float32Array) {
        const int16Array = new Int16Array(float32Array.length);
        
        for (let i = 0; i < float32Array.length; i++) {
          // 将-1.0到1.0的浮点数转换为-32768到32767的整数
          const s = Math.max(-1, Math.min(1, float32Array[i]));
          int16Array[i] = s < 0 ? s * 0x8000 : s * 0x7FFF;
        }
        
        return int16Array;
      }
      
      // 重采样音频数据（简化版，仅用于演示）
      // 注意：这是一个简单的线性插值实现，音质有限
      // 在实际应用中，强烈建议使用专业的重采样库，如：
      // - resampler.js: https://github.com/taisel/XAudioJS/blob/master/resampler.js
      // - audio-resampler: https://www.npmjs.com/package/audio-resampler
      // - web-audio-api-resampler: https://github.com/echo66/node-web-audio-api-resampler
      // 这些库提供更高质量的重采样算法，如Sinc插值或FFT重采样，
      // 可以显著提高音频质量，特别是对于语音识别应用
      function resampleAudio(audioData, originalSampleRate, targetSampleRate) {
        if (originalSampleRate === targetSampleRate) {
          return audioData;
        }
        
        const ratio = originalSampleRate / targetSampleRate;
        const newLength = Math.round(audioData.length / ratio);
        const result = new Float32Array(newLength);
        
        for (let i = 0; i < newLength; i++) {
          const pos = i * ratio;
          const index = Math.floor(pos);
          const fraction = pos - index;
          
          if (index + 1 < audioData.length) {
            // 线性插值
            result[i] = audioData[index] * (1 - fraction) + audioData[index + 1] * fraction;
          } else {
            result[i] = audioData[index];
          }
        }
        
        return result;
      }
      
      // 开始录音
      async function startRecording() {
        try {
          // 获取配置值
          const bufferSize = parseInt(bufferSizeSelect.value);
          const targetSampleRate = parseInt(sampleRateSelect.value);
          const processingEnabled = processingEnabledCheckbox.checked;
          
          // 更新UI
          targetSampleRateEl.textContent = targetSampleRate;
          
          // 设置音频处理配置
          MicrophoneSDK.setAudioProcessingConfig({
            enabled: processingEnabled,
            bufferSize: bufferSize,
            inputChannels: 1,
            outputChannels: 1
          });
          
          // 开始麦克风录音
          await MicrophoneSDK.startListening();
          
          // 更新UI
          startBtn.disabled = true;
          stopBtn.disabled = false;
          micStatus.textContent = '麦克风状态: 已启动';
          
          // 重置计数器
          callbackCount = 0;
          callbackCountEl.textContent = callbackCount;
          
          // 注册音频数据回调
          if (unsubscribeAudioData) {
            unsubscribeAudioData();
          }
          
          unsubscribeAudioData = MicrophoneSDK.registerAudioDataCallback(data => {
            // 更新计数器
            callbackCount++;
            callbackCountEl.textContent = callbackCount;
            
            // 更新音频数据统计
            originalSampleRateEl.textContent = data.sampleRate;
            currentBufferSizeEl.textContent = data.audioData.length;
            
            // 保存最新的音频数据用于波形绘制
            lastAudioData = data.audioData;
            drawWaveform(lastAudioData);
            
            // 处理音频数据
            const targetSampleRate = parseInt(sampleRateSelect.value);
            
            // 重采样（如果需要）
            let processedData = data.audioData;
            if (data.sampleRate !== targetSampleRate) {
              processedData = resampleAudio(data.audioData, data.sampleRate, targetSampleRate);
            }
            
            // 转换为16位整数
            const pcmData = convertToInt16(processedData);
            
            // 每20次回调记录一次日志，避免日志过多
            if (callbackCount % 20 === 0) {
              logAudioData(`处理音频数据: 原始长度=${data.audioData.length}, 原始采样率=${data.sampleRate}Hz, 目标采样率=${targetSampleRate}Hz, 处理后长度=${processedData.length}`);
            }
            
            // 这里可以将pcmData发送到语音识别服务
            // 例如: sendToSpeechRecognitionService(pcmData, targetSampleRate);
          });
          
          logAudioData('已开始录音并注册音频数据回调');
        } catch (error) {
          console.error('启动录音失败:', error);
          logAudioData(`错误: 启动录音失败 - ${error.message}`);
        }
      }
      
      // 停止录音
      async function stopRecording() {
        try {
          // 停止麦克风录音
          await MicrophoneSDK.stopListening();
          
          // 取消注册音频数据回调
          if (unsubscribeAudioData) {
            unsubscribeAudioData();
            unsubscribeAudioData = null;
          }
          
          // 更新UI
          startBtn.disabled = false;
          stopBtn.disabled = true;
          micStatus.textContent = '麦克风状态: 已停止';
          
          logAudioData('已停止录音并取消注册音频数据回调');
        } catch (error) {
          console.error('停止录音失败:', error);
          logAudioData(`错误: 停止录音失败 - ${error.message}`);
        }
      }
      
      // 记录音频数据日志
      function logAudioData(message) {
        const logEntry = document.createElement('div');
        logEntry.textContent = `[${new Date().toLocaleTimeString()}] ${message}`;
        audioDataLog.appendChild(logEntry);
        audioDataLog.scrollTop = audioDataLog.scrollHeight;
      }
      
      // 监听音频电平变化
      MicrophoneSDK.onAudioLevelChange(level => {
        levelBar.style.width = `${level * 100}%`;
      });
      
      // 监听麦克风状态变化
      MicrophoneSDK.onMicrophoneStatusChange(status => {
        micStatus.textContent = `麦克风状态: ${status.isListening ? '已启动' : '已停止'}`;
      });
      
      // 设置按钮事件处理器
      startBtn.addEventListener('click', startRecording);
      stopBtn.addEventListener('click', stopRecording);
      
      // 设置选择框事件处理器
      bufferSizeSelect.addEventListener('change', () => {
        logAudioData(`缓冲区大小已更改为: ${bufferSizeSelect.value} 样本`);
      });
      
      sampleRateSelect.addEventListener('change', () => {
        targetSampleRateEl.textContent = sampleRateSelect.value;
        logAudioData(`目标采样率已更改为: ${sampleRateSelect.value} Hz`);
      });
      
      processingEnabledCheckbox.addEventListener('change', () => {
        logAudioData(`音频处理已${processingEnabledCheckbox.checked ? '启用' : '禁用'}`);
      });
      
      // 设置画布
      setupCanvas();
      window.addEventListener('resize', setupCanvas);
      
      // 记录初始日志
      logAudioData('页面已加载，SDK已初始化');
      logAudioData(`音频处理配置: ${JSON.stringify(MicrophoneSDK.getAudioProcessingConfig())}`);
      
      // 定期更新波形（即使没有新数据）
      setInterval(() => {
        if (lastAudioData) {
          drawWaveform(lastAudioData);
        }
      }, 100);
    });
  </script>
</body>
</html> 