import axios from 'axios';

const API_URL = 'https://api.minimax.chat/v1/text/chatcompletion_v2';
const API_KEY = 'eyJhbGciOiJSUzI1NiIsInR5cCI6IkpXVCJ9.eyJHcm91cE5hbWUiOiJwYW4iLCJVc2VyTmFtZSI6InBhbiIsIkFjY291bnQiOiIiLCJTdWJqZWN0SUQiOiIxODkyMTM3OTEyMzA1MTkzNDI2IiwiUGhvbmUiOiIxMzk1ODcwNTMxMiIsIkdyb3VwSUQiOiIxODkyMTM3OTEyMjk2ODA0ODE4IiwiUGFnZU5hbWUiOiIiLCJNYWlsIjoiIiwiQ3JlYXRlVGltZSI6IjIwMjUtMDItMjAgMTc6MzU6NDUiLCJUb2tlblR5cGUiOjEsImlzcyI6Im1pbmltYXgifQ.AOn_d-P_aamdxqJLxYsgjjHTMGYneI4TEcsvF6HG30pTnaUUhv1Pvy3OSgih2dlccPS1RO5QUfyBvhIrVbRGaXzEyj_basJ5e0T-1IXruTep45NwUbKmB80r1a-lRDmXDfKVae4RS80kjJkptYglkGHXg1fdHKwlRrw68A9isk_B_vXnpm0xk6gIAKI7HgEBPfS88x_J5V7xTO4dE_qLoxYMgKD2Jdz3QWAAfGh3Kt1hEmCYRFY5rivGJpPEJP4jcDM7Csifa9IHl-iODhFR1UHQgCLJF4h8OkG3Zf6gP8E4eTax3U2IA3VnELjaMZs77e-6hWxjw-IipcDzRWCyCg';

// 可用的模型列表
export const MODELS = {
  MINIMAX_TEXT_01: 'MiniMax-Text-01',
  ABAB_6_5S_CHAT: 'abab6.5s-chat',
  DEEPSEEK_R1: 'DeepSeek-R1'
};

// 常规聊天请求 - 非流式
export const chatCompletion = async (messages, model = MODELS.MINIMAX_TEXT_01) => {
  try {
    const headers = {
      'Authorization': `Bearer ${API_KEY}`,
      'Content-Type': 'application/json'
    };

    // 获取模型配置（非流式）
    const payload = getModelConfig(model, false);
    
    // 添加消息
    payload.messages = messages;

    const response = await axios.post(API_URL, payload, { headers });
    return response.data;
  } catch (error) {
    console.error('聊天请求失败:', error);
    throw error;
  }
};

// 流式聊天请求 - 使用fetch API和流式响应
export const streamChatCompletion = async (messages, model = MODELS.MINIMAX_TEXT_01, onChunk) => {
  try {
    const headers = {
      'Authorization': `Bearer ${API_KEY}`,
      'Content-Type': 'application/json'
    };

    // 获取模型配置
    const payload = getModelConfig(model, true);
    
    // 添加消息
    payload.messages = messages;

    // 使用fetch API进行流式请求
    const response = await fetch(API_URL, {
      method: 'POST',
      headers,
      body: JSON.stringify(payload)
    });

    if (!response.ok) {
      throw new Error(`API请求失败: ${response.status} ${response.statusText}`);
    }

    const reader = response.body.getReader();
    const decoder = new TextDecoder();
    let buffer = '';

    // 处理流式响应
    while (true) {
      const { done, value } = await reader.read();
      if (done) break;
      
      buffer += decoder.decode(value, { stream: true });
      
      // 处理缓冲区中的完整数据块
      const lines = buffer.split('\n\n');
      buffer = lines.pop() || ''; // 最后一行可能不完整，保留到下一次
      
      for (const line of lines) {
        if (line.trim() && !line.includes('data: [DONE]')) {
          try {
            const cleanLine = line.replace(/^data: /, '').trim();
            if (cleanLine) {
              const data = JSON.parse(cleanLine);
              
              // 特别处理DeepSeek-R1模型的响应
              if (model === MODELS.DEEPSEEK_R1) {
                // 检查是否包含reasoning_content字段
                if (data.choices && 
                    data.choices[0] && 
                    data.choices[0].delta && 
                    data.choices[0].delta.reasoning_content) {
                  // 标记这是思考链内容
                  data.is_reasoning = true;
                } 
                // 检查是否已经结束思考阶段
                else if (data.choices && 
                         data.choices[0] && 
                         data.choices[0].finish_reason === 'reasoning_end') {
                  // 标记思考阶段结束
                  data.reasoning_end = true;
                }
                // 否则检查是否包含最终内容字段
                else if (data.choices && 
                         data.choices[0] && 
                         data.choices[0].delta && 
                         data.choices[0].delta.content) {
                  // 标记这是最终内容
                  data.is_content = true;
                }
              }
              
              onChunk(data);
            }
          } catch (e) {
            console.error('解析流数据出错:', e, line);
          }
        }
      }
    }
  } catch (error) {
    console.error('流式聊天请求失败:', error);
    throw error;
  }
};

// 根据模型返回合适的最大token数
function getMaxTokensForModel(model) {
  switch(model) {
    case MODELS.MINIMAX_TEXT_01:
      return 100000;
    case MODELS.ABAB_6_5S_CHAT:
      return 100000;
    case MODELS.DEEPSEEK_R1:
      return 32000;
    default:
      return 10000;
  }
}

// 格式化消息为API所需格式
export const formatMessages = (messages, systemPrompt = '', model = MODELS.MINIMAX_TEXT_01) => {
  const formattedMessages = [];
  
  // 添加系统提示（如果有）
  if (systemPrompt) {
    if (model === MODELS.DEEPSEEK_R1) {
      // 对于DeepSeek-R1模型，将系统提示添加到第一条用户消息前面
      const userIndex = messages.findIndex(msg => msg.isUser);
      if (userIndex >= 0) {
        const userMsg = messages[userIndex];
        // 将系统提示添加到第一条用户消息中
        messages[userIndex] = {
          ...userMsg,
          content: `系统提示: ${systemPrompt}\n\n用户问题: ${userMsg.content}`
        };
        console.log('DeepSeek-R1模型: 系统提示已合并到第一条用户消息中');
      }
    } else {
      // 对于其他模型，直接添加系统消息
      formattedMessages.push({
        role: 'system',
        content: systemPrompt
      });
      console.log('已添加系统提示消息');
    }
  }
  
  // 处理历史消息，确保模型切换时消息格式正确
  let lastModelSwitch = null;
  messages.forEach((msg, index) => {
    // 跳过role为system的消息，避免重复
    if (msg.role === 'system') return;
    
    // 检查是否是模型切换的系统通知
    if (msg.isSystemNotification && msg.content.includes('已切换到')) {
      lastModelSwitch = msg;
      // 如果是模型切换消息，添加新的系统提示
      if (systemPrompt && model !== MODELS.DEEPSEEK_R1) {
        formattedMessages.push({
          role: 'system',
          content: systemPrompt
        });
      }
      return;
    }
    
    // 如果是用户消息，且在DeepSeek-R1模型下有系统提示，且是切换后的第一条用户消息
    if (msg.isUser && model === MODELS.DEEPSEEK_R1 && lastModelSwitch && 
        messages.slice(0, index).filter(m => m.isUser && m.timestamp > lastModelSwitch.timestamp).length === 0) {
      formattedMessages.push({
        role: 'user',
        content: `系统提示: ${systemPrompt}\n\n用户问题: ${msg.content}`
      });
    } else {
      // 正常添加消息
      formattedMessages.push({
        role: msg.isUser ? 'user' : 'assistant',
        content: msg.content
      });
    }
  });
  
  return formattedMessages;
};

// 获取模型配置参数
export const getModelConfig = (model = MODELS.MINIMAX_TEXT_01, isStream = true) => {
  const config = {
    model,
    stream: isStream,
    max_tokens: getMaxTokensForModel(model),
    temperature: 0.7
  };
  
  // 为DeepSeek-R1模型特别设置
  if (model === MODELS.DEEPSEEK_R1) {
    // 启用思考链输出
    config.stream_options = {
      reasoning_output: true
    };
    
    // DeepSeek-R1不建议设置系统提示
    // 调整温度参数
    config.temperature = 0.6;
  }
  
  return config;
};

export default {
  MODELS,
  chatCompletion,
  streamChatCompletion,
  formatMessages,
  getModelConfig
}; 