import axios, { AxiosResponse } from 'axios';
import { AppConfig, Message, AIModelConfig, Interruptible, ToolCall, ToolResult } from '../types';
import { interruptController } from './interruptController';
import { toolExecutor } from './toolExecutor';

export interface StreamResponseHandler {
  (chunk: string, isThinking?: boolean, thinkingContent?: string, toolCalls?: ToolCall[], toolResults?: ToolResult[]): void;
}

class AIService implements Interruptible {
  private abortController: AbortController | null = null;
  private isRegistered = false;

  constructor() {
    this.registerWithInterruptController();
  }

  /**
   * 注册到中断控制器
   */
  private registerWithInterruptController(): void {
    if (!this.isRegistered) {
      interruptController.addInterruptible(this);
      this.isRegistered = true;
    }
  }

  /**
   * 实现Interruptible接口
   */
  async interrupt(reason?: string): Promise<void> {
    console.log(`AI服务中断: ${reason || '用户中断'}`);
    this.cancelCurrentRequest();
  }

  /**
   * 检查是否正在中断
   */
  isInterrupting(): boolean {
    return interruptController.isInterrupting();
  }

  async sendMessage(
    userMessage: string,
    history: Message[],
    config: AppConfig,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    const model = config.models[config.main_ai_model];
    if (!model) {
      throw new Error(`找不到模型配置: ${config.main_ai_model}`);
    }

    // 取消之前的请求
    if (this.abortController) {
      this.abortController.abort();
    }
    
    this.abortController = new AbortController();

    try {
      // 在发送请求前，提供工具使用说明
      const toolInstructions = toolExecutor.generateToolUsageInstructions();
      const enhancedMessage = toolInstructions 
        ? `${userMessage}\n\n${toolInstructions}`
        : userMessage;

      switch (model.model_type.toLowerCase()) {
        case 'openai':
          await this.sendOpenAIMessage(enhancedMessage, history, model, onStreamUpdate);
          break;
        case 'anthropic':
          await this.sendAnthropicMessage(enhancedMessage, history, model, onStreamUpdate);
          break;
        case 'custom':
          await this.sendCustomMessage(enhancedMessage, history, model, onStreamUpdate);
          break;
        case 'ollama':
          await this.sendOllamaMessage(enhancedMessage, history, model, onStreamUpdate);
          break;
        case 'localai':
          await this.sendLocalAIMessage(enhancedMessage, history, model, onStreamUpdate);
          break;
        case 'custom_local':
          await this.sendCustomLocalMessage(enhancedMessage, history, model, onStreamUpdate);
          break;
        default:
          throw new Error(`不支持的模型类型: ${model.model_type}`);
      }
    } finally {
      this.abortController = null;
    }
  }

  private async sendOpenAIMessage(
    userMessage: string,
    history: Message[],
    model: AIModelConfig,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    const messages = this.buildMessageHistory(history, userMessage);
    
    const requestBody = {
      model: model.model_name,
      messages: messages,
      stream: true,
      temperature: 0.7,
      max_tokens: 2000
    };

    const response = await axios.post(model.model_url, requestBody, {
      headers: {
        'Content-Type': 'application/json',
        'Authorization': `Bearer ${model.model_token || ''}`,
      },
      responseType: 'stream',
      signal: this.abortController?.signal,
      timeout: 30000
    });

    await this.handleOpenAIStream(response, onStreamUpdate);
  }

  private async sendAnthropicMessage(
    userMessage: string,
    history: Message[],
    model: AIModelConfig,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    const messages = this.buildMessageHistory(history, userMessage);
    
    const requestBody = {
      model: model.model_name,
      messages: messages,
      stream: true,
      max_tokens: 2000
    };

    const response = await axios.post(model.model_url, requestBody, {
      headers: {
        'Content-Type': 'application/json',
        'x-api-key': model.model_token || '',
        'anthropic-version': '2023-06-01'
      },
      responseType: 'stream',
      signal: this.abortController?.signal,
      timeout: 30000
    });

    await this.handleAnthropicStream(response, onStreamUpdate);
  }

  private async sendCustomMessage(
    userMessage: string,
    history: Message[],
    model: AIModelConfig,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    const messages = this.buildMessageHistory(history, userMessage);
    
    const requestBody = {
      model: model.model_name,
      messages: messages,
      stream: true
    };

    const headers: Record<string, string> = {
      'Content-Type': 'application/json'
    };

    if (model.model_token) {
      headers['Authorization'] = `Bearer ${model.model_token}`;
    }

    const response = await axios.post(model.model_url, requestBody, {
      headers,
      responseType: 'stream',
      signal: this.abortController?.signal,
      timeout: 30000
    });

    await this.handleCustomStream(response, onStreamUpdate);
  }

  /**
   * 处理工具调用并返回增强后的内容
   */
  private async processToolCalls(content: string): Promise<{ enhancedContent: string; toolCalls: ToolCall[]; toolResults: ToolResult[] }> {
    const toolCalls = toolExecutor.parseToolCalls(content);
    
    if (toolCalls.length === 0) {
      return { enhancedContent: content, toolCalls: [], toolResults: [] };
    }

    try {
      // 执行工具调用
      const toolResults = await toolExecutor.executeToolCalls(toolCalls);
      
      // 格式化工具结果
      const toolResultsText = toolExecutor.formatToolResults(toolResults);
      
      // 将工具结果添加到内容中
      const enhancedContent = content + '\n\n' + toolResultsText;
      
      return { enhancedContent, toolCalls, toolResults };
    } catch (error) {
      console.error('工具调用处理失败:', error);
      return { enhancedContent: content, toolCalls, toolResults: [] };
    }
  }

  private buildMessageHistory(history: Message[], newMessage: string) {
    const messages = history
      .filter(msg => msg.status === 'sent')
      .map(msg => ({
        role: msg.role,
        content: msg.content
      }));

    messages.push({
      role: 'user' as const,
      content: newMessage
    });

    return messages;
  }

  private async handleOpenAIStream(
    response: AxiosResponse,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    let buffer = '';
    let fullContent = '';

    return new Promise((resolve, reject) => {
      response.data.on('data', (chunk: Buffer) => {
        buffer += chunk.toString();
        const lines = buffer.split('\n');
        buffer = lines.pop() || '';

        for (const line of lines) {
          if (line.trim() === '') continue;
          if (line.trim() === 'data: [DONE]') {
            resolve();
            return;
          }

          if (line.startsWith('data: ')) {
            try {
              const data = JSON.parse(line.slice(6));
              const content = data.choices?.[0]?.delta?.content;
              if (content) {
                fullContent += content;
                onStreamUpdate(fullContent);
              }
            } catch (error) {
              console.error('解析流数据失败:', error);
            }
          }
        }
      });

      response.data.on('end', () => {
        resolve();
      });

      response.data.on('error', (error: Error) => {
        reject(error);
      });
    });
  }

  private async handleAnthropicStream(
    response: AxiosResponse,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    let buffer = '';
    let fullContent = '';

    return new Promise((resolve, reject) => {
      response.data.on('data', (chunk: Buffer) => {
        buffer += chunk.toString();
        const lines = buffer.split('\n');
        buffer = lines.pop() || '';

        for (const line of lines) {
          if (line.trim() === '') continue;
          if (line.startsWith('data: ')) {
            try {
              const data = JSON.parse(line.slice(6));
              if (data.type === 'content_block_delta') {
                const content = data.delta?.text;
                if (content) {
                  fullContent += content;
                  onStreamUpdate(fullContent);
                }
              } else if (data.type === 'message_stop') {
                resolve();
                return;
              }
            } catch (error) {
              console.error('解析Anthropic流数据失败:', error);
            }
          }
        }
      });

      response.data.on('end', () => {
        resolve();
      });

      response.data.on('error', (error: Error) => {
        reject(error);
      });
    });
  }

  private async handleCustomStream(
    response: AxiosResponse,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    // 默认按OpenAI格式处理自定义API
    return this.handleOpenAIStream(response, onStreamUpdate);
  }

  // Ollama API支持
  private async sendOllamaMessage(
    userMessage: string,
    history: Message[],
    model: AIModelConfig,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    const messages = this.buildMessageHistory(history, userMessage);
    
    const requestBody: any = {
      model: model.model_name,
      messages: messages,
      stream: true
    };

    // 应用本地配置
    if (model.local_config) {
      if (model.local_config.temperature !== undefined) {
        requestBody.options = { temperature: model.local_config.temperature };
      }
      if (model.local_config.system_prompt) {
        requestBody.messages.unshift({
          role: 'system',
          content: model.local_config.system_prompt
        });
      }
      if (model.local_config.keep_alive) {
        requestBody.keep_alive = model.local_config.keep_alive;
      }
    }

    const response = await axios.post(model.model_url, requestBody, {
      headers: {
        'Content-Type': 'application/json'
      },
      responseType: 'stream',
      signal: this.abortController?.signal,
      timeout: model.local_config?.timeout || 30000
    });

    await this.handleOllamaStream(response, onStreamUpdate);
  }

  // LocalAI API支持
  private async sendLocalAIMessage(
    userMessage: string,
    history: Message[],
    model: AIModelConfig,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    const messages = this.buildMessageHistory(history, userMessage);
    
    const requestBody: any = {
      model: model.model_name,
      messages: messages,
      stream: true
    };

    // 应用本地配置
    if (model.local_config) {
      if (model.local_config.temperature !== undefined) {
        requestBody.temperature = model.local_config.temperature;
      }
      if (model.local_config.max_tokens !== undefined) {
        requestBody.max_tokens = model.local_config.max_tokens;
      }
      if (model.local_config.backend) {
        requestBody.backend = model.local_config.backend;
      }
    }

    const response = await axios.post(model.model_url, requestBody, {
      headers: {
        'Content-Type': 'application/json'
      },
      responseType: 'stream',
      signal: this.abortController?.signal,
      timeout: model.local_config?.timeout || 30000
    });

    // LocalAI使用OpenAI兼容的流格式
    await this.handleOpenAIStream(response, onStreamUpdate);
  }

  // 自定义本地模型API支持
  private async sendCustomLocalMessage(
    userMessage: string,
    history: Message[],
    model: AIModelConfig,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    const messages = this.buildMessageHistory(history, userMessage);
    
    const requestBody: any = {
      model: model.model_name,
      messages: messages,
      stream: true
    };

    // 应用本地配置
    if (model.local_config) {
      if (model.local_config.temperature !== undefined) {
        requestBody.temperature = model.local_config.temperature;
      }
      if (model.local_config.max_tokens !== undefined) {
        requestBody.max_tokens = model.local_config.max_tokens;
      }
    }

    const headers: Record<string, string> = {
      'Content-Type': 'application/json'
    };

    // 添加自定义请求头
    if (model.local_config?.custom_headers) {
      Object.assign(headers, model.local_config.custom_headers);
    }

    if (model.model_token) {
      headers['Authorization'] = `Bearer ${model.model_token}`;
    }

    const response = await axios.post(model.model_url, requestBody, {
      headers,
      responseType: 'stream',
      signal: this.abortController?.signal,
      timeout: model.local_config?.timeout || 30000
    });

    // 使用专门的thinking模型流处理
    await this.handleThinkingModelStream(response, onStreamUpdate);
  }

  // Ollama流处理
  private async handleOllamaStream(
    response: AxiosResponse,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    let buffer = '';
    let fullContent = '';

    return new Promise((resolve, reject) => {
      response.data.on('data', (chunk: Buffer) => {
        buffer += chunk.toString();
        const lines = buffer.split('\n');
        buffer = lines.pop() || '';

        for (const line of lines) {
          if (line.trim() === '') continue;
          
          try {
            const data = JSON.parse(line);
            
            // Ollama返回格式：{ "message": { "content": "text" }, "done": false }
            if (data.message && data.message.content) {
              fullContent += data.message.content;
              onStreamUpdate(fullContent);
            }
            
            if (data.done) {
              resolve();
              return;
            }
          } catch (error) {
            console.error('解析Ollama流数据失败:', error);
          }
        }
      });

      response.data.on('end', () => {
        resolve();
      });

      response.data.on('error', (error: Error) => {
        reject(error);
      });
    });
  }

  // Thinking模型流处理 - 专门处理包含<thinking>标签或直接thinking内容的模型
  private async handleThinkingModelStream(
    response: AxiosResponse,
    onStreamUpdate: StreamResponseHandler
  ): Promise<void> {
    let buffer = '';
    let fullContent = '';
    let isThinkingPhase = true; // 默认初始是thinking阶段
    let thinkingContent = '';
    let finalAnswer = '';
    let toolCalls: ToolCall[] = [];
    let toolResults: ToolResult[] = [];

    return new Promise((resolve, reject) => {
      response.data.on('data', async (chunk: Buffer) => {
        buffer += chunk.toString();
        const lines = buffer.split('\n');
        buffer = lines.pop() || '';

        for (const line of lines) {
          if (line.trim() === '') continue;
          if (line.trim() === 'data: [DONE]') {
            // 在结束时处理工具调用
            if (toolExecutor.hasToolCalls(fullContent)) {
              try {
                const result = await this.processToolCalls(fullContent);
                toolCalls = result.toolCalls;
                toolResults = result.toolResults;
                finalAnswer = result.enhancedContent;
                onStreamUpdate(finalAnswer, false, thinkingContent, toolCalls, toolResults);
              } catch (error) {
                console.error('工具调用处理失败:', error);
              }
            }
            resolve();
            return;
          }

          if (line.startsWith('data: ')) {
            try {
              const data = JSON.parse(line.slice(6));
              const content = data.choices?.[0]?.delta?.content;
              if (content) {
                fullContent += content;
                
                // 检查是否包含<thinking>标签
                if (fullContent.includes('<thinking>')) {
                  isThinkingPhase = true;
                  const thinkingMatch = fullContent.match(/<thinking>([\s\S]*?)(?:<\/thinking>|$)/);
                  if (thinkingMatch) {
                    thinkingContent = thinkingMatch[1];
                  }
                  
                  if (fullContent.includes('</thinking>')) {
                    // thinking结束，提取最终答案
                    isThinkingPhase = false;
                    const afterThinking = fullContent.split('</thinking>')[1] || '';
                    finalAnswer = afterThinking.trim();
                    
                    // 检查是否包含工具调用
                    if (toolExecutor.hasToolCalls(finalAnswer)) {
                      const result = await this.processToolCalls(finalAnswer);
                      toolCalls = result.toolCalls;
                      toolResults = result.toolResults;
                      finalAnswer = result.enhancedContent;
                    }
                    
                    onStreamUpdate(finalAnswer, false, thinkingContent, toolCalls, toolResults);
                  } else {
                    // 仍在thinking中
                    onStreamUpdate('', true, thinkingContent);
                  }
                } else if (fullContent.includes('<think>')) {
                  // 处理<think>标签格式
                  isThinkingPhase = true;
                  const thinkMatch = fullContent.match(/<think>([\s\S]*?)(?:<\/think>|$)/);
                  if (thinkMatch) {
                    thinkingContent = thinkMatch[1];
                  }
                  
                  if (fullContent.includes('</think>')) {
                    isThinkingPhase = false;
                    const afterThink = fullContent.split('</think>')[1] || '';
                    finalAnswer = afterThink.trim();
                    
                    if (toolExecutor.hasToolCalls(finalAnswer)) {
                      const result = await this.processToolCalls(finalAnswer);
                      toolCalls = result.toolCalls;
                      toolResults = result.toolResults;
                      finalAnswer = result.enhancedContent;
                    }
                    
                    onStreamUpdate(finalAnswer, false, thinkingContent, toolCalls, toolResults);
                  } else {
                    onStreamUpdate('', true, thinkingContent);
                  }
                } else {
                  // 没有明确标签，根据内容特征判断
                  const thinkingPatterns = [
                    /^嘉[\uff0c。]/,  // 以“嘉”开头
                    /^先[\u8bb0]/,      // 以“先记忆”开头  
                    /^接着/,         // 以“接着”开头
                    /^可能/,         // 以“可能”开头
                    /^要不要/,       // 以“要不要”开头
                    /^结构上/,       // 以“结构上”开头
                    /^现在组织/     // 以“现在组织”开头
                  ];
                  
                  const isThinkingContent = thinkingPatterns.some(pattern => 
                    pattern.test(fullContent.trim())
                  );
                  
                  if (isThinkingContent && isThinkingPhase) {
                    thinkingContent = fullContent;
                    onStreamUpdate('', true, thinkingContent);
                  } else if (isThinkingPhase && fullContent.length > 100) {
                    // 如果内容太长且仍在thinking阶段，尝试切换到正式回复
                    const lines = fullContent.split('\n');
                    const lastFewLines = lines.slice(-3).join('\n');
                    
                    // 如果最后几行不像思考内容，切换到正式回复
                    const hasAnswerPattern = /^##|^以下是|^---/.test(lastFewLines);
                    if (hasAnswerPattern) {
                      isThinkingPhase = false;
                      finalAnswer = fullContent;
                      
                      if (toolExecutor.hasToolCalls(finalAnswer)) {
                        const result = await this.processToolCalls(finalAnswer);
                        toolCalls = result.toolCalls;
                        toolResults = result.toolResults;
                        finalAnswer = result.enhancedContent;
                      }
                      
                      onStreamUpdate(finalAnswer, false, thinkingContent, toolCalls, toolResults);
                    } else {
                      thinkingContent = fullContent;
                      onStreamUpdate('', true, thinkingContent);
                    }
                  } else {
                    // 默认作为正式回复
                    isThinkingPhase = false;
                    finalAnswer = fullContent;
                    
                    if (toolExecutor.hasToolCalls(finalAnswer)) {
                      const result = await this.processToolCalls(finalAnswer);
                      toolCalls = result.toolCalls;
                      toolResults = result.toolResults;
                      finalAnswer = result.enhancedContent;
                    }
                    
                    onStreamUpdate(finalAnswer, false, thinkingContent, toolCalls, toolResults);
                  }
                }
              }
            } catch (error) {
              console.error('解析流数据失败:', error);
            }
          }
        }
      });

      response.data.on('end', () => {
        resolve();
      });

      response.data.on('error', (error: Error) => {
        reject(error);
      });
    });
  }

  /**
   * 取消当前请求
   */
  cancelCurrentRequest(): void {
    if (this.abortController) {
      this.abortController.abort();
      this.abortController = null;
    }
  }

  /**
   * 清理资源
   */
  cleanup(): void {
    this.cancelCurrentRequest();
    if (this.isRegistered) {
      interruptController.removeInterruptible(this);
      this.isRegistered = false;
    }
  }
}

export const aiService = new AIService();