// controllers/unifiedAsrController.js
// 统一语音识别控制器

const axios = require('axios');
const FormData = require('form-data');
const { preprocessAudio } = require('./audioPreprocessController');
const { transcribeWithAliyunASR } = require('./aliyunAsrController');

// 识别引擎配置
const ASR_ENGINE = process.env.ASR_ENGINE || 'whisper'; // whisper, aliyun, google, etc.

/**
 * 使用Whisper服务进行语音识别
 * @param {Buffer} audioBuffer - 音频数据缓冲区
 */
async function transcribeWithWhisper(audioBuffer) {
  try {
    // 预处理音频
    const processedAudioBuffer = await preprocessAudio(audioBuffer, '.webm');
    
    // 创建表单数据
    const form = new FormData();
    form.append('audio', processedAudioBuffer, { 
      filename: 'audio.wav',
      contentType: 'audio/wav'
    });

    // 调用本地的Python Whisper API
    const response = await axios.post('http://localhost:5001/transcribe', form, {
      headers: {
        ...form.getHeaders(),
      },
      timeout: 30000 // 设置30秒超时
    });

    return response.data.result;
  } catch (error) {
    console.error('Whisper转录失败:', error.response ? error.response.data : error.message);
    throw new Error('Whisper转录失败: ' + (error.response?.data?.error || error.message));
  }
}

/**
 * 使用阿里云ASR服务进行语音识别
 * @param {Buffer} audioBuffer - 音频数据缓冲区
 */
async function transcribeWithAliyun(audioBuffer) {
  try {
    // 预处理音频
    const processedAudioBuffer = await preprocessAudio(audioBuffer, '.webm');
    
    // 调用阿里云ASR服务
    const result = await transcribeWithAliyunASR(processedAudioBuffer, 'wav');
    return result;
  } catch (error) {
    console.error('阿里云ASR调用失败:', error.message);
    // 回退到Whisper
    console.log('阿里云ASR失败，回退到Whisper');
    return await transcribeWithWhisper(audioBuffer);
  }
}

/**
 * 统一转录接口
 */
exports.unifiedTranscribe = async (req, res) => {
  if (!req.file) {
    return res.status(400).json({ msg: 'No audio file uploaded.' });
  }

  try {
    let result;
    
    // 根据配置选择识别引擎
    switch (ASR_ENGINE.toLowerCase()) {
      case 'aliyun':
        result = await transcribeWithAliyun(req.file.buffer);
        break;
      case 'whisper':
      default:
        result = await transcribeWithWhisper(req.file.buffer);
        break;
    }
    
    res.json({ result, engine: ASR_ENGINE });
  } catch (error) {
    console.error('语音识别错误:', error);
    res.status(500).send('语音识别过程中出现错误: ' + error.message);
  }
};

// 导出内部函数供测试使用
module.exports.transcribeWithWhisper = transcribeWithWhisper;
module.exports.transcribeWithAliyun = transcribeWithAliyun;