/**
 * 语音识别集成测试
 * 测试MicrophoneSDK与语音识别服务的集成
 */

import { MicrophoneSDK } from '../../src/MicrophoneSDK';

// 模拟Web Speech API
class MockSpeechRecognition {
  constructor() {
    this.continuous = false;
    this.interimResults = false;
    this.lang = 'zh-CN';
    this.maxAlternatives = 1;
    this.onstart = null;
    this.onresult = null;
    this.onerror = null;
    this.onend = null;
    this.grammars = null;
    this.isStarted = false;
  }
  
  start() {
    this.isStarted = true;
    if (this.onstart) this.onstart();
    
    // 模拟识别结果
    setTimeout(() => {
      if (this.onresult && this.isStarted) {
        const event = {
          resultIndex: 0,
          results: [
            {
              isFinal: true,
              0: {
                transcript: '这是一个测试结果',
                confidence: 0.9
              },
              length: 1
            }
          ]
        };
        this.onresult(event);
      }
      
      // 模拟识别结束
      setTimeout(() => {
        if (this.onend && this.isStarted) {
          this.isStarted = false;
          this.onend();
        }
      }, 500);
    }, 1000);
  }
  
  stop() {
    if (this.isStarted) {
      this.isStarted = false;
      if (this.onend) this.onend();
    }
  }
  
  abort() {
    this.stop();
  }
}

// 设置全局模拟
global.SpeechRecognition = MockSpeechRecognition;
global.webkitSpeechRecognition = MockSpeechRecognition;

describe('语音识别集成测试', () => {
  let sdk;
  let recognition;
  
  beforeEach(() => {
    // 创建SDK实例
    sdk = new MicrophoneSDK();
    
    // 创建语音识别实例
    recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
  });
  
  afterEach(() => {
    // 停止语音识别
    if (recognition && recognition.isStarted) {
      recognition.stop();
    }
    
    // 清理SDK资源
    if (sdk) {
      sdk.dispose();
      sdk = null;
    }
  });
  
  test('SDK初始化并获取麦克风流', async () => {
    // 初始化SDK
    const initResult = await sdk.initialize();
    expect(initResult).toBe(true);
    
    // 开始监听
    const startResult = await sdk.startListening();
    expect(startResult).toBe(true);
    
    // 获取麦克风流
    const stream = sdk.getMicrophoneStream();
    expect(stream).not.toBeNull();
  });
  
  test('SDK与语音识别集成', async () => {
    // 初始化SDK
    await sdk.initialize();
    await sdk.startListening();
    
    // 设置语音识别参数
    recognition.continuous = false;
    recognition.interimResults = true;
    recognition.lang = 'zh-CN';
    
    // 创建结果Promise
    const recognitionResult = new Promise((resolve) => {
      recognition.onresult = (event) => {
        const result = event.results[event.resultIndex][0].transcript;
        resolve(result);
      };
    });
    
    // 开始语音识别
    recognition.start();
    
    // 等待识别结果
    const result = await recognitionResult;
    expect(result).toBe('这是一个测试结果');
  });
  
  test('注册音频数据回调并处理数据', async () => {
    // 初始化SDK
    await sdk.initialize();
    
    // 创建结果Promise
    const audioDataPromise = new Promise((resolve) => {
      // 注册音频数据回调
      sdk.registerAudioDataCallback((audioData) => {
        resolve(audioData);
      });
    });
    
    // 开始监听以触发音频处理
    await sdk.startListening();
    
    // 等待音频数据
    const audioData = await audioDataPromise;
    expect(audioData).toEqual(expect.objectContaining({
      channelData: expect.any(Array),
      sampleRate: expect.any(Number),
      timestamp: expect.any(Number)
    }));
  });
  
  test('设置音频处理配置以适应语音识别', async () => {
    // 初始化SDK
    await sdk.initialize();
    
    // 设置适合语音识别的音频处理配置
    const config = {
      enableProcessing: true,
      sampleRate: 16000, // 大多数语音识别服务使用16kHz采样率
      bufferSize: 4096,
      channelCount: 1 // 单声道更适合语音识别
    };
    
    sdk.setAudioProcessingConfig(config);
    
    // 获取配置并验证
    const currentConfig = sdk.getAudioProcessingConfig();
    expect(currentConfig).toEqual(expect.objectContaining({
      enableProcessing: true,
      sampleRate: 16000,
      bufferSize: 4096,
      channelCount: 1
    }));
  });
  
  test('模拟实时语音识别处理', async () => {
    // 初始化SDK
    await sdk.initialize();
    
    // 设置适合语音识别的音频处理配置
    sdk.setAudioProcessingConfig({
      enableProcessing: true,
      sampleRate: 16000,
      bufferSize: 4096,
      channelCount: 1
    });
    
    // 模拟语音识别处理函数
    const processAudio = jest.fn();
    
    // 注册音频数据回调
    sdk.registerAudioDataCallback(processAudio);
    
    // 开始监听
    await sdk.startListening();
    
    // 等待一段时间以确保回调被调用
    await new Promise(resolve => setTimeout(resolve, 500));
    
    // 验证处理函数被调用
    expect(processAudio).toHaveBeenCalled();
    
    // 停止监听
    await sdk.stopListening();
  });
  
  test('处理语音识别错误', async () => {
    // 初始化SDK
    await sdk.initialize();
    await sdk.startListening();
    
    // 创建错误Promise
    const errorPromise = new Promise((resolve) => {
      recognition.onerror = (event) => {
        resolve(event);
      };
      
      // 模拟错误
      setTimeout(() => {
        if (recognition.onerror) {
          recognition.onerror({ error: 'no-speech', message: 'No speech detected' });
        }
      }, 500);
    });
    
    // 开始语音识别
    recognition.start();
    
    // 等待错误
    const error = await errorPromise;
    expect(error).toEqual(expect.objectContaining({
      error: 'no-speech'
    }));
  });
}); 