import axios from 'axios';

/**
 * 处理流式响应
 * @param response fetch API的响应对象
 * @param onProgress 进度回调函数
 * @returns 聊天响应
 */
async function handleStreamResponse(response: Response, onProgress: (chunk: string) => void): Promise<ChatResponse> {
  const reader = response.body!.getReader();
  const decoder = new TextDecoder('utf-8');
  let fullContent = '';
  const chunks: string[] = [];
  
  try {
    while (true) {
      const { done, value } = await reader.read();
      
      if (done) {
        break;
      }
      
      // 解码二进制数据
      const chunk = decoder.decode(value, { stream: true });
      // 按行分割
      const lines = chunk.split('\n').filter(line => line.trim());
      
      for (const line of lines) {
        if (line.startsWith('data: ') && !line.includes('[DONE]')) {
          try {
            const jsonStr = line.substring(6); // 去掉'data: '前缀
            const data = JSON.parse(jsonStr);
            
            if (data.choices?.[0]?.delta?.content) {
              const content = data.choices[0].delta.content;
              chunks.push(content);
              
              // 调用进度回调
              onProgress(content);
            }
          } catch (error) {
            console.error('解析流式数据时出错:', error);
          }
        }
      }
    }
    
    fullContent = chunks.join('');
    return {
      message: {
        role: 'assistant',
        content: fullContent || '抱歉，我无法回答这个问题。'
      },
      usage: {
        total_tokens: 0 // 流式响应中没有使用量信息
      }
    };
  } catch (error) {
    console.error('处理流式响应时出错:', error);
    throw error;
  }
}

interface ChatMessage {
  role: 'user' | 'assistant';
  content: string;
}

interface ChatResponse {
  message: ChatMessage;
  usage: {
    total_tokens: number;
  };
}

export function useAiApi() {
  // 阿里云OpenAI兼容模式API的基础URL
  const baseUrl = 'https://dashscope.aliyuncs.com/compatible-mode/v1/chat/completions';
  
  // 从环境变量获取API密钥
  const apiKey = import.meta.env.VITE_QWEN_API_KEY || '';
  
  /**
   * 发送聊天消息到阿里云OpenAI兼容模式API
   * @param message 用户消息
   * @returns 聊天响应
   */
  const sendChatMessage = async (message: string, onProgress?: (chunk: string) => void): Promise<ChatResponse> => {
    try {
      // 构建请求体（OpenAI兼容格式）
      const requestBody = {
        model: 'qwen-turbo', // 使用通义千问的turbo模型，可根据需要更改
        messages: [
          {
            role: 'user',
            content: message
          }
        ],
        temperature: 0.7,
        stream: onProgress ? true : false
      };
      
      if (onProgress) {
        // 使用axios处理流式响应
        let fullContent = '';
        const chunks: string[] = [];
        let buffer = '';
        
        // 发送请求并处理流式响应
        const response = await axios.post(baseUrl, requestBody, {
          headers: {
            'Content-Type': 'application/json',
            'Authorization': `Bearer ${apiKey}`
          },
          responseType: 'text',
          onDownloadProgress: (progressEvent) => {
            if (!progressEvent.event || !progressEvent.event.target) return;
            
            const xhr = progressEvent.event.target as XMLHttpRequest;
            const newText = xhr.responseText;
            
            // 获取新增的文本
            const newChunk = newText.substring(buffer.length);
            buffer = newText;
            
            // 按行分割并处理
            const lines = newChunk.split('\n');
            
            for (const line of lines) {
              if (line.trim() === '' || line.includes('[DONE]')) continue;
              if (line.startsWith('data: ')) {
                try {
                  const jsonStr = line.substring(6); // 去掉'data: '前缀
                  const data = JSON.parse(jsonStr);
                  
                  if (data.choices?.[0]?.delta?.content) {
                    const content = data.choices[0].delta.content;
                    chunks.push(content);
                    
                    // 调用进度回调
                    onProgress(content);
                  }
                } catch (error) {
                  console.error('解析流式数据时出错:', error);
                }
              }
            }
          }
        });
        
        // 流式响应完成后返回
        fullContent = chunks.join('');
        return {
          message: {
            role: 'assistant',
            content: fullContent || '抱歉，我无法回答这个问题。'
          },
          usage: {
            total_tokens: 0 // 流式响应中没有使用量信息
          }
        };
      } else {
        // 非流式响应使用普通axios请求
        const response = await axios.post(baseUrl, requestBody, {
          headers: {
            'Content-Type': 'application/json',
            'Authorization': `Bearer ${apiKey}`
          }
        });
        
        // 非流式响应处理
        return {
          message: {
            role: 'assistant',
            content: response.data.choices[0].message.content
          },
          usage: {
            total_tokens: response.data.usage?.total_tokens || 0
          }
        };
      }
    } catch (error) {
      console.error('与AI模型通信时出错:', error);
      throw error;
    }
  };
  /**
   * 获取API密钥状态
   * @returns 密钥是否有效
   */
  const checkApiKey = async (): Promise<boolean> => {
    try {
      // 发送一个简单的测试请求（OpenAI兼容格式）
      const requestBody = {
        model: 'qwen-turbo',
        messages: [
          {
            role: 'user',
            content: '你好'
          }
        ],
        temperature: 0.7
      };
      
      const response = await axios.post(baseUrl, requestBody, {
        headers: {
          'Content-Type': 'application/json',
          'Authorization': `Bearer ${apiKey}`
        }
      });
      console.log('状态码：', response.status)
      return response.status === 200 || response.status === 204;
    } catch (error) {
      console.error('检查API密钥时出错:', error);
      return false;
    }
  };

  /**
   * 获取当前API密钥
   * @returns 当前环境变量中的API密钥
   */
  const getApiKey = (): string => {
    return apiKey;
  };

  return {
    sendChatMessage,
    checkApiKey,
    getApiKey
  };
}