const axios = require('axios')
const AiConfig = require('../models/AiConfig')
const ChatMessage = require('../models/ChatMessage')
const config = require('../config')
const logger = require('../utils/logger')
const { 
  AI_MODELS, 
  AI_PROVIDERS, 
  AI_PROVIDER_CONFIGS,
  MESSAGE_ROLES, 
  ERROR_CODES,
  SYSTEM_MESSAGES 
} = require('../utils/constants')
const { AppError } = require('../middleware/errorHandler')

class AiService {
  /**
   * 发送消息到AI模型
   */
  static async sendMessage(userId, sessionId, messageData) {
    try {
      const { content, aiConfigId, systemPrompt } = messageData
      
      // 获取AI配置
      const aiConfig = await this.getAiConfig(userId, aiConfigId)
      
      // 获取会话历史
      const messages = await this.buildMessageHistory(sessionId, systemPrompt)
      
      // 添加用户消息
      messages.push({
        role: MESSAGE_ROLES.USER,
        content: content
      })
      
      // 调用AI API
      const startTime = Date.now()
      const response = await this.callAiApi(aiConfig, messages)
      const responseTime = Date.now() - startTime
      
      // 保存用户消息
      const userMessage = await ChatMessage.create({
        id: require('crypto').randomUUID(),
        sessionId,
        userId,
        role: MESSAGE_ROLES.USER,
        content,
        aiConfigId,
        metadata: {
          aiConfigId,
          timestamp: new Date().toISOString()
        }
      })
      
      // 保存AI回复
      const assistantMessage = await ChatMessage.create({
        id: require('crypto').randomUUID(),
        sessionId,
        userId,
        role: MESSAGE_ROLES.ASSISTANT,
        content: response.content,
        aiConfigId,
        metadata: {
          aiConfigId,
          model: aiConfig.model,
          provider: aiConfig.provider,
          tokenUsage: response.tokenUsage,
          responseTime,
          timestamp: new Date().toISOString()
        }
      })
      
      logger.info('Messages saved to database successfully', {
        userId,
        sessionId,
        aiConfigId,
        messageLength: content.length,
        responseLength: response.content.length
      })

      return {
        userMessage,
        assistantMessage,
        tokenUsage: response.tokenUsage,
        responseTime
      }
    } catch (error) {
      logger.error('Failed to save user message to database', {
        error: error.message,
        userId,
        sessionId,
        messageData
      })
      throw error
    }
  }
  
  /**
   * 获取AI配置
   */
  static async getAiConfig(userId, aiConfigId) {
    let aiConfig
    
    if (aiConfigId) {
      aiConfig = await AiConfig.findByPk(aiConfigId)
      if (!aiConfig) {
        throw new AppError(ERROR_CODES.CONFIG_NOT_FOUND, 'AI配置不存在')
      }
      
      // 检查配置所有权
      if (aiConfig.userId !== userId && !aiConfig.isDefault) {
        throw new AppError(ERROR_CODES.ACCESS_DENIED, '无权访问此AI配置')
      }
    } else {
      // 使用默认配置
      aiConfig = await AiConfig.findOne({ where: { userId, isDefault: true, isActive: true } })
      if (!aiConfig) {
        throw new AppError(ERROR_CODES.CONFIG_NOT_FOUND, '未找到可用的AI配置')
      }
    }
    
    // 检查配置状态
    if (!aiConfig.isActive) {
      throw new AppError(ERROR_CODES.CONFIG_NOT_FOUND, 'AI配置已禁用')
    }
    
    return aiConfig
  }
  
  /**
   * 构建消息历史
   */
  static async buildMessageHistory(sessionId, systemPrompt) {
    const messages = []
    
    // 添加系统提示
    if (systemPrompt) {
      messages.push({
        role: MESSAGE_ROLES.SYSTEM,
        content: systemPrompt
      })
    } else {
      messages.push({
        role: MESSAGE_ROLES.SYSTEM,
        content: SYSTEM_MESSAGES.DEFAULT
      })
    }
    
    // 获取会话消息历史（最近20条）
    const result = await ChatMessage.findAndCountAll({
      where: {
        sessionId,
        isDeleted: false
      },
      limit: 20,
      order: [['createdAt', 'DESC']]
    })
    const chatMessages = result.rows
    
    // 反转消息顺序（从旧到新）
    chatMessages.reverse()
    
    // 添加到消息列表
    for (const msg of chatMessages) {
      if (msg.role !== MESSAGE_ROLES.SYSTEM) {
        messages.push({
          role: msg.role,
          content: msg.content
        })
      }
    }
    
    return messages
  }
  
  /**
   * 调用AI API
   */
  static async callAiApi(aiConfig, messages) {
    try {
      // 直接使用API密钥（数据库中暂时未加密）
      const apiKey = aiConfig.apiKey
      
      // 根据提供商调用相应的API
      switch (aiConfig.provider) {
        case AI_PROVIDERS.SILICONFLOW:
          return await this.callSiliconFlowApi(aiConfig, messages, apiKey)
        case AI_PROVIDERS.OPENAI:
          return await this.callOpenAiApi(aiConfig, messages, apiKey)
        case AI_PROVIDERS.DEEPSEEK:
          return await this.callDeepSeekApi(aiConfig, messages, apiKey)
        default:
          throw new AppError(ERROR_CODES.AI_SERVICE_ERROR, `不支持的AI提供商: ${aiConfig.provider}`)
      }
    } catch (error) {
      if (error.response) {
        // API响应错误
        const status = error.response.status
        const data = error.response.data
        
        logger.error('AI API error', {
          status,
          data,
          provider: aiConfig.provider,
          model: aiConfig.model
        })
        
        if (status === 401) {
          throw new AppError(ERROR_CODES.AI_SERVICE_ERROR, 'AI服务认证失败，请检查API密钥')
        } else if (status === 429) {
          throw new AppError(ERROR_CODES.RATE_LIMIT_EXCEEDED, 'AI服务调用频率超限')
        } else if (status >= 500) {
          throw new AppError(ERROR_CODES.SERVICE_UNAVAILABLE, 'AI服务暂时不可用')
        } else {
          throw new AppError(ERROR_CODES.AI_SERVICE_ERROR, data.error?.message || 'AI服务调用失败')
        }
      } else if (error.code === 'ECONNREFUSED' || error.code === 'ETIMEDOUT') {
        throw new AppError(ERROR_CODES.SERVICE_UNAVAILABLE, 'AI服务连接失败')
      } else {
        throw error
      }
    }
  }
  
  /**
   * 调用SiliconFlow API
   */
  static async callSiliconFlowApi(aiConfig, messages, apiKey) {
    const siliconflowConfig = AI_PROVIDER_CONFIGS[AI_PROVIDERS.SILICONFLOW]
    const response = await axios.post(
      aiConfig.apiUrl || siliconflowConfig.baseUrl + '/chat/completions',
      {
        model: aiConfig.model,
        messages,
        max_tokens: Number(aiConfig.parameters?.maxTokens || aiConfig.maxTokens) || 2000,
        temperature: Number(aiConfig.parameters?.temperature || aiConfig.temperature) || 0.7,
        top_p: Number(aiConfig.parameters?.topP || aiConfig.topP) || 0.9,
        stream: false
      },
      {
        headers: {
          'Authorization': `Bearer ${apiKey}`,
          'Content-Type': 'application/json'
        },
        timeout: 30000
      }
    )
    
    const choice = response.data.choices[0]
    const usage = response.data.usage
    
    return {
      content: choice.message.content,
      tokenUsage: {
        promptTokens: usage.prompt_tokens,
        completionTokens: usage.completion_tokens,
        totalTokens: usage.total_tokens
      }
    }
  }
  
  /**
   * 调用OpenAI API
   */
  static async callOpenAiApi(aiConfig, messages, apiKey) {
    const response = await axios.post(
      aiConfig.apiUrl || 'https://api.openai.com/v1/chat/completions',
      {
        model: aiConfig.model,
        messages,
        max_tokens: Number(aiConfig.parameters.maxTokens) || 2000,
        temperature: Number(aiConfig.parameters.temperature) || 0.7,
        top_p: Number(aiConfig.parameters.topP) || 0.9
      },
      {
        headers: {
          'Authorization': `Bearer ${apiKey}`,
          'Content-Type': 'application/json'
        },
        timeout: 30000
      }
    )
    
    const choice = response.data.choices[0]
    const usage = response.data.usage
    
    return {
      content: choice.message.content,
      tokenUsage: {
        promptTokens: usage.prompt_tokens,
        completionTokens: usage.completion_tokens,
        totalTokens: usage.total_tokens
      }
    }
  }
  
  /**
   * 调用DeepSeek API
   */
  static async callDeepSeekApi(aiConfig, messages, apiKey) {
    const response = await axios.post(
      aiConfig.apiUrl || 'https://api.deepseek.com/v1/chat/completions',
      {
        model: aiConfig.model,
        messages,
        max_tokens: Number(aiConfig.parameters.maxTokens) || 2000,
        temperature: Number(aiConfig.parameters.temperature) || 0.7,
        top_p: Number(aiConfig.parameters.topP) || 0.9
      },
      {
        headers: {
          'Authorization': `Bearer ${apiKey}`,
          'Content-Type': 'application/json'
        },
        timeout: 30000
      }
    )
    
    const choice = response.data.choices[0]
    const usage = response.data.usage
    
    return {
      content: choice.message.content,
      tokenUsage: {
        promptTokens: usage.prompt_tokens,
        completionTokens: usage.completion_tokens,
        totalTokens: usage.total_tokens
      }
    }
  }
  
  /**
   * 测试AI配置连接
   */
  static async testConnection(aiConfig) {
    try {
      const testMessages = [
        {
          role: MESSAGE_ROLES.SYSTEM,
          content: '你是一个AI助手。'
        },
        {
          role: MESSAGE_ROLES.USER,
          content: '请回复"连接测试成功"'
        }
      ]
      
      const startTime = Date.now()
      const response = await this.callAiApi(aiConfig, testMessages)
      const responseTime = Date.now() - startTime
      
      return {
        success: true,
        responseTime,
        tokenUsage: response.tokenUsage,
        response: response.content
      }
    } catch (error) {
      return {
        success: false,
        error: error.message
      }
    }
  }
  
  /**
   * 获取支持的模型列表
   */
  static getSupportedModels(provider) {
    const providerConfig = AI_PROVIDER_CONFIGS[provider]
    return providerConfig ? providerConfig.models : []
  }
  
  /**
   * 获取支持的提供商列表
   */
  static getSupportedProviders() {
    return Object.values(AI_PROVIDERS)
  }
  
  /**
   * 验证模型参数
   */
  static validateParameters(parameters) {
    const errors = []
    
    if (parameters.maxTokens && (parameters.maxTokens < 1 || parameters.maxTokens > 4096)) {
      errors.push('maxTokens必须在1-4096之间')
    }
    
    if (parameters.temperature && (parameters.temperature < 0 || parameters.temperature > 2)) {
      errors.push('temperature必须在0-2之间')
    }
    
    if (parameters.topP && (parameters.topP < 0 || parameters.topP > 1)) {
      errors.push('topP必须在0-1之间')
    }
    
    if (parameters.frequencyPenalty && (parameters.frequencyPenalty < -2 || parameters.frequencyPenalty > 2)) {
      errors.push('frequencyPenalty必须在-2到2之间')
    }
    
    if (parameters.presencePenalty && (parameters.presencePenalty < -2 || parameters.presencePenalty > 2)) {
      errors.push('presencePenalty必须在-2到2之间')
    }
    
    return errors
  }
  
  /**
   * 计算消息成本（估算）
   */
  static calculateCost(tokenUsage, model, provider) {
    // 这里是简化的成本计算，实际应用中需要根据具体的定价策略
    const costPerToken = this.getTokenCost(model, provider)
    
    const promptCost = (tokenUsage.promptTokens || 0) * costPerToken.input
    const completionCost = (tokenUsage.completionTokens || 0) * costPerToken.output
    
    return {
      promptCost,
      completionCost,
      totalCost: promptCost + completionCost,
      currency: 'USD'
    }
  }
  
  /**
   * 获取Token成本
   */
  static getTokenCost(model, provider) {
    // 简化的定价表，实际应用中应该从配置或数据库中获取
    const pricing = {
      [AI_PROVIDERS.SILICONFLOW]: {
        input: 0.0001,
        output: 0.0002
      },
      [AI_PROVIDERS.OPENAI]: {
        input: 0.0015,
        output: 0.002
      },
      [AI_PROVIDERS.DEEPSEEK]: {
        input: 0.0001,
        output: 0.0002
      }
    }
    
    return pricing[provider] || { input: 0, output: 0 }
  }
  
  /**
   * 流式响应处理（预留接口）
   */
  static async sendStreamMessage(userId, sessionId, messageData, onChunk) {
    // 这里可以实现流式响应
    // 由于复杂性，暂时使用普通响应
    return await this.sendMessage(userId, sessionId, messageData)
  }
}

module.exports = AiService