/**
 * 真实AI服务实现
 * 对接OpenAI、Claude、本地大模型等真实AI服务
 */

// AI服务提供商枚举
export enum AIProvider {
  OPENAI = 'openai',
  CLAUDE = 'claude',
  GEMINI = 'gemini',
  QWEN = 'qwen',          // 通义千问
  CHATGLM = 'chatglm',    // 智谱ChatGLM
  BAICHUAN = 'baichuan',  // 百川
  LOCAL = 'local',        // 本地部署模型
  OLLAMA = 'ollama'       // Ollama本地部署（支持llava3等多模态模型）
}

// AI模型配置
export interface AIModelConfig {
  provider: AIProvider
  model: string
  apiKey?: string
  baseUrl?: string
  maxTokens?: number
  temperature?: number
  systemPrompt?: string
  supportsImages?: boolean  // 支持图像输入
  supportsVision?: boolean  // 支持视觉分析
}

// AI服务配置
export interface RealAIConfig {
  defaultProvider: AIProvider
  models: Record<AIProvider, AIModelConfig>
  fallbackChain: AIProvider[]
  timeout: number
  retryAttempts: number
  enableCache: boolean
  enableLogging: boolean
}

// 消息角色
export enum MessageRole {
  SYSTEM = 'system',
  USER = 'user',
  ASSISTANT = 'assistant'
}

// 对话消息
export interface ChatMessage {
  role: MessageRole
  content: string | Array<{
    type: 'text' | 'image_url'
    text?: string
    image_url?: {
      url: string
      detail?: 'low' | 'high' | 'auto'
    }
  }>
  metadata?: {
    timestamp: Date
    provider?: AIProvider
    model?: string
    tokens?: number
  }
}

// AI响应
export interface AIResponse {
  id: string
  content: string
  provider: AIProvider
  model: string
  usage?: {
    promptTokens: number
    completionTokens: number
    totalTokens: number
  }
  finishReason: 'stop' | 'length' | 'content_filter' | 'error'
  metadata?: {
    processingTime: number
    cached: boolean
    fallback?: boolean
  }
}

class RealAIService {
  private config: RealAIConfig
  private cache: Map<string, { response: AIResponse; timestamp: number }> = new Map()
  private conversationHistories: Map<string, ChatMessage[]> = new Map()

  constructor() {
    this.config = {
      defaultProvider: AIProvider.OLLAMA, // 优先使用Ollama多模态AI服务
      models: {
        [AIProvider.LOCAL]: {
          provider: AIProvider.LOCAL,
          model: 'qwen2.5-14b-instruct',
          baseUrl: typeof window !== 'undefined' ? 'http://localhost:8000' : 'https://api.zhihunyun.xyz',
          maxTokens: 2000,
          temperature: 0.7,
          systemPrompt: '你是一个专业的医疗助手，专门为医院数字孪生系统提供服务。你需要提供准确、专业、友善的医疗咨询和导航服务。请用中文回答。'
        },
        [AIProvider.OPENAI]: {
          provider: AIProvider.OPENAI,
          model: 'gpt-4o-mini',
          apiKey: process.env.OPENAI_API_KEY || '',
          baseUrl: process.env.OPENAI_BASE_URL || 'https://api.openai.com/v1',
          maxTokens: 2000,
          temperature: 0.7,
          systemPrompt: '你是一个专业的医疗助手，专门为医院数字孪生系统提供服务。你需要提供准确、专业、友善的医疗咨询和导航服务。'
        },
        [AIProvider.CLAUDE]: {
          provider: AIProvider.CLAUDE,
          model: 'claude-3-haiku-20240307',
          apiKey: process.env.CLAUDE_API_KEY || '',
          baseUrl: 'https://api.anthropic.com',
          maxTokens: 2000,
          temperature: 0.7,
          systemPrompt: '你是一个专业的医疗助手，专门为医院数字孪生系统提供服务。'
        },
        [AIProvider.GEMINI]: {
          provider: AIProvider.GEMINI,
          model: 'gemini-1.5-flash',
          apiKey: process.env.GEMINI_API_KEY || '',
          baseUrl: 'https://generativelanguage.googleapis.com/v1beta',
          maxTokens: 2000,
          temperature: 0.7
        },
        [AIProvider.QWEN]: {
          provider: AIProvider.QWEN,
          model: 'qwen-turbo',
          apiKey: process.env.QWEN_API_KEY || '',
          baseUrl: 'https://dashscope.aliyuncs.com/api/v1',
          maxTokens: 2000,
          temperature: 0.7
        },
        [AIProvider.OLLAMA]: {
          provider: AIProvider.OLLAMA,
          model: 'llava:13b',  // 使用您的llava 13b模型，性能更强大
          baseUrl: typeof window !== 'undefined' ? 'http://localhost:11434' : 'http://localhost:11434',
          maxTokens: 4000,
          temperature: 0.7,
          systemPrompt: '你是一个专业的医疗助手，能够分析医疗图像和文本。请提供准确、专业的医疗建议和分析结果。请用中文回答。',
          supportsImages: true,
          supportsVision: true
        }
      },
      fallbackChain: [AIProvider.OLLAMA, AIProvider.LOCAL], // 优先使用Ollama，再降级到其他本地服务
      timeout: 60000, // 增加超时时间
      retryAttempts: 2,
      enableCache: true,
      enableLogging: true
    }
  }

  /**
   * 发送聊天消息到AI服务
   */
  async chat(messages: ChatMessage[], options?: {
    provider?: AIProvider
    sessionId?: string
    temperature?: number
    maxTokens?: number
  }): Promise<AIResponse> {
    const startTime = Date.now()
    const provider = options?.provider || this.config.defaultProvider
    const sessionId = options?.sessionId || 'default'
    
    try {
      // 检查缓存
      if (this.config.enableCache) {
        const cacheKey = this.generateCacheKey(messages, provider)
        const cached = this.cache.get(cacheKey)
        if (cached && Date.now() - cached.timestamp < 5 * 60 * 1000) { // 5分钟缓存
          return { ...cached.response, metadata: { ...cached.response.metadata, cached: true } }
        }
      }

      // 尝试发送到指定提供商
      let response: AIResponse
      try {
        response = await this.sendToProvider(messages, provider, options)
      } catch (error) {
        console.warn(`Provider ${provider} failed, trying fallback:`, error)
        response = await this.tryFallbackProviders(messages, provider, options)
      }

      // 更新对话历史
      this.updateConversationHistory(sessionId, messages, response)

      // 缓存响应
      if (this.config.enableCache && response.finishReason === 'stop') {
        const cacheKey = this.generateCacheKey(messages, response.provider)
        this.cache.set(cacheKey, {
          response,
          timestamp: Date.now()
        })
      }

      // 添加处理时间
      response.metadata = {
        ...response.metadata,
        processingTime: Date.now() - startTime
      }

      return response
    } catch (error) {
      console.error('AI服务请求失败:', error)
      throw new Error(`AI服务暂时不可用: ${error}`)
    }
  }

  /**
   * 发送消息到指定AI提供商
   */
  private async sendToProvider(
    messages: ChatMessage[], 
    provider: AIProvider, 
    options?: any
  ): Promise<AIResponse> {
    const modelConfig = this.config.models[provider]
    
    switch (provider) {
      case AIProvider.OPENAI:
        return await this.sendToOpenAI(messages, modelConfig, options)
      case AIProvider.CLAUDE:
        return await this.sendToClaude(messages, modelConfig, options)
      case AIProvider.GEMINI:
        return await this.sendToGemini(messages, modelConfig, options)
      case AIProvider.QWEN:
        return await this.sendToQwen(messages, modelConfig, options)
      case AIProvider.LOCAL:
        return await this.sendToLocal(messages, modelConfig, options)
      case AIProvider.OLLAMA:
        return await this.sendToOllama(messages, modelConfig, options)
      default:
        throw new Error(`不支持的AI提供商: ${provider}`)
    }
  }

  /**
   * OpenAI API调用
   */
  private async sendToOpenAI(
    messages: ChatMessage[], 
    config: AIModelConfig, 
    options?: any
  ): Promise<AIResponse> {
    if (!config.apiKey) {
      throw new Error('OpenAI API密钥未配置')
    }

    // 构建消息格式
    const openaiMessages = messages.map(msg => ({
      role: msg.role,
      content: msg.content
    }))

    // 添加系统提示
    if (config.systemPrompt) {
      openaiMessages.unshift({
        role: 'system',
        content: config.systemPrompt
      })
    }

    const response = await fetch(`${config.baseUrl}/chat/completions`, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
        'Authorization': `Bearer ${config.apiKey}`
      },
      body: JSON.stringify({
        model: config.model,
        messages: openaiMessages,
        max_tokens: options?.maxTokens || config.maxTokens,
        temperature: options?.temperature || config.temperature,
        stream: false
      })
    })

    if (!response.ok) {
      const error = await response.json().catch(() => ({}))
      throw new Error(`OpenAI API错误: ${response.status} - ${error.error?.message || 'Unknown error'}`)
    }

    const data = await response.json()
    
    return {
      id: data.id,
      content: data.choices[0].message.content,
      provider: AIProvider.OPENAI,
      model: config.model!,
      usage: {
        promptTokens: data.usage?.prompt_tokens || 0,
        completionTokens: data.usage?.completion_tokens || 0,
        totalTokens: data.usage?.total_tokens || 0
      },
      finishReason: data.choices[0].finish_reason === 'stop' ? 'stop' : 'length'
    }
  }

  /**
   * Claude API调用
   */
  private async sendToClaude(
    messages: ChatMessage[], 
    config: AIModelConfig, 
    options?: any
  ): Promise<AIResponse> {
    if (!config.apiKey) {
      throw new Error('Claude API密钥未配置')
    }

    // Claude API格式
    const claudeMessages = messages.filter(m => m.role !== MessageRole.SYSTEM).map(msg => ({
      role: msg.role === MessageRole.USER ? 'user' : 'assistant',
      content: msg.content
    }))

    const response = await fetch(`${config.baseUrl}/messages`, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
        'x-api-key': config.apiKey,
        'anthropic-version': '2023-06-01'
      },
      body: JSON.stringify({
        model: config.model,
        max_tokens: options?.maxTokens || config.maxTokens,
        temperature: options?.temperature || config.temperature,
        system: config.systemPrompt,
        messages: claudeMessages
      })
    })

    if (!response.ok) {
      const error = await response.json().catch(() => ({}))
      throw new Error(`Claude API错误: ${response.status} - ${error.error?.message || 'Unknown error'}`)
    }

    const data = await response.json()
    
    return {
      id: data.id,
      content: data.content[0].text,
      provider: AIProvider.CLAUDE,
      model: config.model!,
      usage: {
        promptTokens: data.usage?.input_tokens || 0,
        completionTokens: data.usage?.output_tokens || 0,
        totalTokens: (data.usage?.input_tokens || 0) + (data.usage?.output_tokens || 0)
      },
      finishReason: data.stop_reason === 'end_turn' ? 'stop' : 'length'
    }
  }

  /**
   * Gemini API调用
   */
  private async sendToGemini(
    messages: ChatMessage[], 
    config: AIModelConfig, 
    options?: any
  ): Promise<AIResponse> {
    if (!config.apiKey) {
      throw new Error('Gemini API密钥未配置')
    }

    // 构建Gemini格式的消息
    const geminiMessages = messages.map(msg => ({
      role: msg.role === MessageRole.USER ? 'user' : 'model',
      parts: [{ text: msg.content }]
    }))

    const response = await fetch(`${config.baseUrl}/models/${config.model}:generateContent?key=${config.apiKey}`, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json'
      },
      body: JSON.stringify({
        contents: geminiMessages,
        generationConfig: {
          temperature: options?.temperature || config.temperature,
          maxOutputTokens: options?.maxTokens || config.maxTokens
        }
      })
    })

    if (!response.ok) {
      const error = await response.json().catch(() => ({}))
      throw new Error(`Gemini API错误: ${response.status} - ${error.error?.message || 'Unknown error'}`)
    }

    const data = await response.json()
    const candidate = data.candidates?.[0]
    
    if (!candidate || !candidate.content) {
      throw new Error('Gemini API返回无效响应')
    }

    return {
      id: `gemini_${Date.now()}`,
      content: candidate.content.parts[0].text,
      provider: AIProvider.GEMINI,
      model: config.model!,
      usage: {
        promptTokens: data.usageMetadata?.promptTokenCount || 0,
        completionTokens: data.usageMetadata?.candidatesTokenCount || 0,
        totalTokens: data.usageMetadata?.totalTokenCount || 0
      },
      finishReason: candidate.finishReason === 'STOP' ? 'stop' : 'length'
    }
  }

  /**
   * 通义千问API调用
   */
  private async sendToQwen(
    messages: ChatMessage[], 
    config: AIModelConfig, 
    options?: any
  ): Promise<AIResponse> {
    if (!config.apiKey) {
      throw new Error('通义千问API密钥未配置')
    }

    const qwenMessages = messages.map(msg => ({
      role: msg.role,
      content: msg.content
    }))

    const response = await fetch(`${config.baseUrl}/services/aigc/text-generation/generation`, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json',
        'Authorization': `Bearer ${config.apiKey}`,
        'X-DashScope-SSE': 'disable'
      },
      body: JSON.stringify({
        model: config.model,
        input: {
          messages: qwenMessages
        },
        parameters: {
          max_tokens: options?.maxTokens || config.maxTokens,
          temperature: options?.temperature || config.temperature
        }
      })
    })

    if (!response.ok) {
      const error = await response.json().catch(() => ({}))
      throw new Error(`通义千问API错误: ${response.status} - ${error.message || 'Unknown error'}`)
    }

    const data = await response.json()
    
    return {
      id: data.request_id || `qwen_${Date.now()}`,
      content: data.output.text,
      provider: AIProvider.QWEN,
      model: config.model!,
      usage: {
        promptTokens: data.usage?.input_tokens || 0,
        completionTokens: data.usage?.output_tokens || 0,
        totalTokens: data.usage?.total_tokens || 0
      },
      finishReason: data.output.finish_reason === 'stop' ? 'stop' : 'length'
    }
  }

  /**
   * 本地AI模型调用（支持多种格式）
   */
  private async sendToLocal(
    messages: ChatMessage[], 
    config: AIModelConfig, 
    options?: any
  ): Promise<AIResponse> {
    
    // 构建消息，添加系统提示
    const fullMessages = []
    if (config.systemPrompt) {
      fullMessages.push({
        role: MessageRole.SYSTEM,
        content: config.systemPrompt
      })
    }
    fullMessages.push(...messages.map(msg => ({
      role: msg.role,
      content: msg.content
    })))

    // 尝试多种常见的本地AI接口格式
    const endpoints = [
      // 格式1: FastAPI标准格式 (你的本地服务)
      {
        url: `${config.baseUrl}/v1/chat/completions`,
        payload: {
          model: config.model,
          messages: fullMessages,
          max_tokens: options?.maxTokens || config.maxTokens,
          temperature: options?.temperature || config.temperature,
          stream: false
        }
      },
      // 格式2: 简化API格式
      {
        url: `${config.baseUrl}/api/chat`,
        payload: {
          model: config.model,
          messages: fullMessages,
          stream: false,
          options: {
            num_predict: options?.maxTokens || config.maxTokens,
            temperature: options?.temperature || config.temperature
          }
        }
      },
      // 格式3: 直接chat接口
      {
        url: `${config.baseUrl}/chat`,
        payload: {
          model: config.model,
          messages: fullMessages,
          max_tokens: options?.maxTokens || config.maxTokens,
          temperature: options?.temperature || config.temperature
        }
      }
    ]

    let lastError: any = null

    // 依次尝试不同的接口格式
    for (const endpoint of endpoints) {
      try {
        console.log(`尝试调用本地AI: ${endpoint.url}`)
        
        const response = await fetch(endpoint.url, {
          method: 'POST',
          headers: {
            'Content-Type': 'application/json'
          },
          body: JSON.stringify(endpoint.payload)
        })

        if (!response.ok) {
          const errorText = await response.text()
          console.warn(`API调用失败 ${endpoint.url}: ${response.status} - ${errorText}`)
          lastError = new Error(`HTTP ${response.status}: ${errorText}`)
          continue
        }

        const data = await response.json()
        console.log('本地AI响应:', data)
        
        // 解析不同格式的响应
        let content = ''
        
        // 格式1: OpenAI兼容格式
        if (data.choices && data.choices[0]?.message?.content) {
          content = data.choices[0].message.content
        }
        // 格式2: Ollama格式
        else if (data.message && data.message.content) {
          content = data.message.content
        }
        // 格式3: 直接返回内容
        else if (data.response) {
          content = data.response
        }
        // 格式4: 简单文本格式
        else if (typeof data === 'string') {
          content = data
        }
        // 格式5: content字段
        else if (data.content) {
          content = data.content
        }
        else {
          console.warn('未知的响应格式:', data)
          continue
        }

        return {
          id: data.id || `local_${Date.now()}`,
          content,
          provider: AIProvider.LOCAL,
          model: config.model!,
          usage: {
            promptTokens: data.usage?.prompt_tokens || 0,
            completionTokens: data.usage?.completion_tokens || 0,
            totalTokens: data.usage?.total_tokens || 0
          },
          finishReason: (data.choices?.[0]?.finish_reason === 'stop' || data.done) ? 'stop' : 'length'
        }

      } catch (error) {
        console.warn(`接口调用异常 ${endpoint.url}:`, error)
        lastError = error
        continue
      }
    }

    // 所有接口都失败了
    throw new Error(`本地AI服务连接失败: ${lastError?.message || '所有接口都不可用'}`)
  }

  /**
   * Ollama API调用
   */
  private async sendToOllama(
    messages: ChatMessage[], 
    config: AIModelConfig, 
    options?: any
  ): Promise<AIResponse> {
    // 构建Ollama格式的消息
    const ollamaMessages = messages.map(msg => {
      // 支持文本和图像混合消息
      if (Array.isArray(msg.content)) {
        return {
          role: msg.role,
          content: this.formatMultimodalContent(msg.content)
        }
      }
      return {
        role: msg.role,
        content: msg.content as string
      }
    })

    // 添加系统提示
    if (config.systemPrompt) {
      ollamaMessages.unshift({
        role: 'system',
        content: config.systemPrompt
      })
    }

    const requestBody = {
      model: config.model,
      messages: ollamaMessages,
      stream: false,
      options: {
        temperature: options?.temperature || config.temperature,
        num_predict: options?.maxTokens || config.maxTokens,
        top_k: 40,
        top_p: 0.9
      }
    }

    console.log('🦙 Ollama请求:', {
      model: config.model,
      messages: ollamaMessages.length,
      baseUrl: config.baseUrl
    })

    const response = await fetch(`${config.baseUrl}/api/chat`, {
      method: 'POST',
      headers: {
        'Content-Type': 'application/json'
      },
      body: JSON.stringify(requestBody)
    })

    if (!response.ok) {
      let errorMessage = `Ollama API错误: ${response.status}`
      try {
        const error = await response.json()
        errorMessage += ` - ${error.error || 'Unknown error'}`
      } catch (e) {
        errorMessage += ` - ${response.statusText}`
      }
      throw new Error(errorMessage)
    }

    const data = await response.json()
    
    if (!data.message || !data.message.content) {
      throw new Error('Ollama API返回无效响应')
    }

    return {
      id: `ollama_${Date.now()}_${Math.random().toString(36).substring(2, 15)}`,
      content: data.message.content,
      provider: AIProvider.OLLAMA,
      model: config.model!,
      usage: {
        promptTokens: data.prompt_eval_count || 0,
        completionTokens: data.eval_count || 0,
        totalTokens: (data.prompt_eval_count || 0) + (data.eval_count || 0)
      },
      finishReason: data.done ? 'stop' : 'length'
    }
  }

  /**
   * 格式化多模态内容（用于Ollama llava模型）
   */
  private formatMultimodalContent(content: Array<{
    type: 'text' | 'image_url'
    text?: string
    image_url?: { url: string; detail?: string }
  }>): string {
    let formattedContent = ''
    
    for (const item of content) {
      if (item.type === 'text' && item.text) {
        formattedContent += item.text
      } else if (item.type === 'image_url' && item.image_url?.url) {
        // 对于Ollama，我们需要将图像URL转换为base64格式
        // 如果是data URL（base64），直接使用
        if (item.image_url.url.startsWith('data:')) {
          formattedContent += `\n\n[图像]: ${item.image_url.url}`
        } else {
          // 如果是HTTP URL，我们需要在后端处理
          formattedContent += `\n\n[图像URL]: ${item.image_url.url}`
        }
      }
    }
    
    return formattedContent
  }

  /**
   * 尝试备用提供商
   */
  private async tryFallbackProviders(
    messages: ChatMessage[], 
    failedProvider: AIProvider, 
    options?: any
  ): Promise<AIResponse> {
    const fallbackChain = this.config.fallbackChain.filter(p => p !== failedProvider)
    
    for (const provider of fallbackChain) {
      try {
        const response = await this.sendToProvider(messages, provider, options)
        response.metadata = { ...response.metadata, fallback: true }
        return response
      } catch (error) {
        console.warn(`Fallback provider ${provider} also failed:`, error)
        continue
      }
    }
    
    throw new Error('所有AI提供商都不可用')
  }

  /**
   * 生成缓存键
   */
  private generateCacheKey(messages: ChatMessage[], provider: AIProvider): string {
    const messageString = messages.map(m => `${m.role}:${m.content}`).join('|')
    return `${provider}:${btoa(messageString).slice(0, 32)}`
  }

  /**
   * 更新对话历史
   */
  private updateConversationHistory(sessionId: string, messages: ChatMessage[], response: AIResponse) {
    if (!this.conversationHistories.has(sessionId)) {
      this.conversationHistories.set(sessionId, [])
    }
    
    const history = this.conversationHistories.get(sessionId)!
    
    // 添加当前消息到历史
    messages.forEach(msg => {
      history.push({
        ...msg,
        metadata: {
          ...msg.metadata,
          timestamp: new Date()
        }
      })
    })
    
    // 添加AI响应到历史
    history.push({
      role: MessageRole.ASSISTANT,
      content: response.content,
      metadata: {
        timestamp: new Date(),
        provider: response.provider,
        model: response.model,
        tokens: response.usage?.totalTokens
      }
    })
    
    // 限制历史长度
    if (history.length > 100) {
      history.splice(0, history.length - 100)
    }
  }

  /**
   * 获取对话历史
   */
  getConversationHistory(sessionId: string): ChatMessage[] {
    return this.conversationHistories.get(sessionId) || []
  }

  /**
   * 清空对话历史
   */
  clearConversationHistory(sessionId: string): void {
    this.conversationHistories.delete(sessionId)
  }

  /**
   * 更新配置
   */
  updateConfig(newConfig: Partial<RealAIConfig>): void {
    this.config = { ...this.config, ...newConfig }
  }

  /**
   * 获取可用的提供商列表
   */
  getAvailableProviders(): AIProvider[] {
    return Object.keys(this.config.models)
      .filter(provider => {
        const config = this.config.models[provider as AIProvider]
        return config.apiKey || provider === AIProvider.LOCAL
      }) as AIProvider[]
  }

  /**
   * 测试提供商连接
   */
  async testProvider(provider: AIProvider): Promise<boolean> {
    try {
      const testMessages: ChatMessage[] = [{
        role: MessageRole.USER,
        content: '你好'
      }]
      
      await this.sendToProvider(testMessages, provider)
      return true
    } catch (error) {
      console.warn(`Provider ${provider} test failed:`, error)
      return false
    }
  }

  /**
   * 分析图像（使用llava3多模态模型）
   */
  async analyzeImage(imageData: string, prompt?: string, options?: {
    provider?: AIProvider
    sessionId?: string
    temperature?: number
    maxTokens?: number
  }): Promise<AIResponse> {
    const provider = options?.provider || AIProvider.OLLAMA
    const config = this.config.models[provider]
    
    // 检查模型是否支持图像分析
    if (!config.supportsVision) {
      throw new Error(`提供商 ${provider} 不支持图像分析功能`)
    }

    // 构建多模态消息
    const analysisPrompt = prompt || '请分析这张图片，特别关注医疗相关的内容。描述你看到了什么，并提供专业的医疗建议或解读。'
    
    const messages: ChatMessage[] = [{
      role: MessageRole.USER,
      content: [
        {
          type: 'text',
          text: analysisPrompt
        },
        {
          type: 'image_url',
          image_url: {
            url: imageData, // base64或URL
            detail: 'high'
          }
        }
      ]
    }]

    console.log('🖼️ 图像分析请求:', {
      provider,
      model: config.model,
      promptLength: analysisPrompt.length,
      imageSize: imageData.length
    })

    try {
      const response = await this.chat(messages, {
        provider,
        sessionId: options?.sessionId,
        temperature: options?.temperature,
        maxTokens: options?.maxTokens || 4000 // 图像分析需要更多token
      })

      console.log('✅ 图像分析完成:', {
        provider: response.provider,
        model: response.model,
        contentLength: response.content.length
      })

      return response
    } catch (error) {
      console.error('❌ 图像分析失败:', error)
      throw error
    }
  }

  /**
   * 医疗图像专业分析
   */
  async analyzeMedicalImage(imageData: string, analysisType: 'xray' | 'ct' | 'mri' | 'ultrasound' | 'general' = 'general', options?: {
    sessionId?: string
    includeRecommendations?: boolean
  }): Promise<AIResponse> {
    const medicalPrompts = {
      xray: '这是一张X光片，请作为专业的放射科医生分析这张图片。描述你观察到的解剖结构、异常发现，并提供可能的诊断建议。注意：这仅供参考，不能替代专业医疗诊断。',
      ct: '这是一张CT扫描图，请作为专业的放射科医生分析这张图片。描述你观察到的解剖结构、密度变化、异常发现，并提供可能的诊断建议。注意：这仅供参考，不能替代专业医疗诊断。',
      mri: '这是一张MRI图像，请作为专业的放射科医生分析这张图片。描述你观察到的解剖结构、信号特征、异常发现，并提供可能的诊断建议。注意：这仅供参考，不能替代专业医疗诊断。',
      ultrasound: '这是一张超声图像，请作为专业的超声科医生分析这张图片。描述你观察到的解剖结构、回声特征、异常发现，并提供可能的诊断建议。注意：这仅供参考，不能替代专业医疗诊断。',
      general: '这是一张医疗相关图像，请作为专业的医生分析这张图片。描述你观察到的内容，并提供专业的医疗解读和建议。注意：这仅供参考，不能替代专业医疗诊断。'
    }

    let prompt = medicalPrompts[analysisType]
    
    if (options?.includeRecommendations) {
      prompt += '\n\n请额外提供：\n1. 建议的进一步检查\n2. 需要关注的要点\n3. 患者教育要点\n4. 紧急性评估'
    }

    return this.analyzeImage(imageData, prompt, {
      provider: AIProvider.OLLAMA,
      sessionId: options?.sessionId,
      temperature: 0.3, // 医疗分析使用较低温度以确保准确性
      maxTokens: 6000
    })
  }
}

// 导出单例实例
export const realAIService = new RealAIService()
export default realAIService