import axios from 'axios';

// 这里需要根据实际的后端API地址进行配置
const API_BASE_URL = process.env.NEXT_PUBLIC_API_BASE_URL || 'http://localhost:3001/api';
const OLLAMA_API_URL = process.env.NEXT_PUBLIC_OLLAMA_API_URL || 'http://localhost:11434/api';

// 语音识别API接口
export const sendVoiceForRecognition = async (audioBlob: Blob): Promise<string> => {
  try {
    const formData = new FormData();
    formData.append('audio', audioBlob);

    const response = await axios.post(`${API_BASE_URL}/speech-to-text`, formData, {
      headers: {
        'Content-Type': 'multipart/form-data',
      },
    });

    return response.data.text;
  } catch (error) {
    console.error('语音识别失败:', error);
    throw new Error('语音识别请求失败');
  }
};

// 文本转语音API接口
export const sendTextForSpeech = async (text: string): Promise<string> => {
  try {
    const response = await axios.post(`${API_BASE_URL}/text-to-speech`, { text });
    return response.data.audioUrl;
  } catch (error) {
    console.error('文本转语音失败:', error);
    throw new Error('文本转语音请求失败');
  }
};

// 直接与Ollama API交互的接口
export const sendMessageToOllama = async (message: string, model: string = 'llama3'): Promise<string> => {
  try {
    const response = await axios.post(`${OLLAMA_API_URL}/generate`, {
      model,
      prompt: message,
      stream: false,
    });
    
    return response.data.response;
  } catch (error) {
    console.error('Ollama API请求失败:', error);
    throw new Error('Ollama API请求失败');
  }
};

// 模拟语音识别API（用于开发测试）
export const mockSpeechRecognition = async (audioBlob: Blob): Promise<string> => {
  // 模拟API延迟
  await new Promise(resolve => setTimeout(resolve, 1000));
  return '这是一个模拟的语音识别结果，实际开发中请替换为真实API调用。';
};

// 模拟文本转语音API（用于开发测试）
export const mockTextToSpeech = async (text: string): Promise<string> => {
  // 模拟API延迟
  await new Promise(resolve => setTimeout(resolve, 1000));
  // 返回一个示例音频URL，实际开发中应替换为真实API返回的URL
  return 'https://example.com/audio/sample.mp3';
}; 