import logger from './logger.js'
import { config, saveConfig } from '../config/index.js'
import axios from 'axios'
import fs from 'fs/promises'
import path from 'path'
import { fileURLToPath } from 'url'

const __dirname = path.dirname(fileURLToPath(import.meta.url))
const CONFIG_FILE = path.resolve(__dirname, '../../config/model.json')

export class ModelService {
  constructor() {
    // 直接使用全局配置
    this.config = config.ollama
    // nomic-embed-text 的向量维度是固定的
    this.embeddingDimension = 768
    this.embeddingCache = new Map()
    this.isInitialized = false
  }

  async init() {
    try {
      logger.info('Initializing model service...')
      await this.testConnection()
      
      // 获取可用模型列表
      const models = await this.listModels()
      logger.info('Available models:', models.map(m => m.name))
      
      // 检查默认模型是否存在
      const modelExists = models.some(m => m.name === this.config.chat.model)
      if (!modelExists) {
        logger.info(`Default model ${this.config.chat.model} not found, pulling...`)
        await this.pullModel(this.config.chat.model)
      }
      
      // 检查嵌入模型是否存在
      const embeddingModelExists = models.some(m => m.name === this.config.embedding.model)
      if (!embeddingModelExists) {
        logger.info(`Embedding model ${this.config.embedding.model} not found, pulling...`)
        await this.pullModel(this.config.embedding.model)
      }

      this.isInitialized = true
      logger.info('Model service initialized successfully')
    } catch (error) {
      logger.error('Failed to initialize model service:', error)
      throw error
    }
  }

  async testConnection(endpoint = this.config.endpoint) {
    try {
      // 确保 endpoint 是解码的 URL
      endpoint = decodeURIComponent(endpoint)
      
      // 确保 endpoint 是有效的 URL 格式
      if (!endpoint.startsWith('http')) {
        endpoint = `http://${endpoint}`
      }
      
      // 移除尾部斜杠
      endpoint = endpoint.replace(/\/$/, '')
      
      const response = await axios.get(`${endpoint}/api/version`)
      return response.data
    } catch (error) {
      logger.error('Failed to connect to Ollama:', error)
      throw new Error('Failed to connect to Ollama service')
    }
  }

  async listModels() {
    try {
      const response = await axios.get(`${this.config.endpoint}/api/tags`)
      logger.debug(`Available models: ${JSON.stringify(response.data.models.map(m => m.name))}`)
      return response.data.models.map(model => ({
        name: model.name,
        size: model.size,
        modified_at: model.modified_at,
        status: model.status || 'ready'
      }))
    } catch (error) {
      logger.error(`Failed to list models: ${error.message}`)
      throw error
    }
  }

  async pullModel(name) {
    try {
      // 解码模型名称
      const decodedName = decodeURIComponent(name)
      logger.info(`Pulling model: ${decodedName}`)
      const startTime = Date.now()
      
      // 使用解码后的名称
      await axios.post(`${this.config.endpoint}/api/pull`, {
        name: decodedName,
        stream: false
      })
      
      const duration = Date.now() - startTime
      logger.info(`Model ${decodedName} pulled successfully in ${duration}ms`)
      return { success: true }
    } catch (error) {
      logger.error('Failed to pull model:', error)
      throw error
    }
  }

  async generateEmbeddingBatch(texts) {
    try {
      const batchSize = this.config.embedding.parameters.chunkSize
      const results = []
      
      for (let i = 0; i < texts.length; i += batchSize) {
        const batch = texts.slice(i, i + batchSize)
        const promises = batch.map(text => this.generateEmbedding(text))
        const embeddings = await Promise.all(promises)
        results.push(...embeddings)
      }
      
      return results
    } catch (error) {
      logger.error('Failed to generate embeddings batch:', error)
      throw error
    }
  }

  async generateEmbedding(text) {
    try {
      if (!this.isInitialized) {
        await this.init()
      }

      // 使用解码后的模型名称
      const response = await axios.post(`${this.config.endpoint}/api/embeddings`, {
        model: this.config.embedding.model,  // 已经是解码后的名称
        prompt: text
      })

      if (!response.data?.embedding) {
        throw new Error('No embedding returned from API')
      }

      return response.data.embedding
    } catch (error) {
      logger.error('Failed to generate embedding:', {
        error: error.message,
        model: this.config.embedding.model,
        decodedModel: decodeURIComponent(this.config.embedding.model)
      })
      throw error
    }
  }

  async generateResponse(prompt, context = '') {
    try {
      if (!this.isInitialized) {
        await this.init()
      }

      logger.debug('Generating response with:', {
        model: this.config.chat.model,
        prompt: prompt.substring(0, 100) + '...',
        parameters: {
          temperature: this.config.chat.parameters.temperature,
          top_p: this.config.chat.parameters.top_p,
          top_k: this.config.chat.parameters.top_k,
          max_tokens: this.config.chat.parameters.max_tokens
        }
      })

      const response = await axios.post(`${this.config.endpoint}/api/generate`, {
        model: this.config.chat.model,
        prompt: context ? `${context}\n\n${prompt}` : prompt,
        stream: true,
        options: {
          temperature: this.config.chat.parameters.temperature,  // 使用配置的温度
          top_p: this.config.chat.parameters.top_p,             // 使用配置的 top_p
          top_k: this.config.chat.parameters.top_k,             // 使用配置的 top_k
          max_tokens: this.config.chat.parameters.max_tokens     // 使用配置的最大令牌数
        }
      }, {
        responseType: 'stream'
      })

      let fullResponse = ''
      for await (const chunk of response.data) {
        const text = chunk.toString()
        try {
          const data = JSON.parse(text)
          if (data.response) {
            fullResponse += data.response
          }
        } catch (e) {
          logger.warn('Failed to parse response chunk:', {
            error: e.message,
            chunk: text
          })
        }
      }

      if (!fullResponse) {
        throw new Error('No response generated')
      }

      return fullResponse.trim()
    } catch (error) {
      logger.error('Failed to generate response:', {
        error: error.message,
        model: this.config.chat.model,
        parameters: this.config.chat.parameters
      })
      throw error
    }
  }

  // 添加流式响应方法
  async *generateResponseStream(prompt, context = '') {
    try {
      if (!this.isInitialized) {
        await this.init()
      }

      // 记录使用的参数
      logger.debug('Generating response with:', {
        model: this.config.chat.model,
        prompt: prompt.substring(0, 100) + '...',
        parameters: {
          temperature: this.config.chat.parameters.temperature,
          top_p: this.config.chat.parameters.top_p,
          top_k: this.config.chat.parameters.top_k,
          max_tokens: this.config.chat.parameters.max_tokens
        }
      })

      const response = await axios.post(`${this.config.endpoint}/api/generate`, {
        model: this.config.chat.model,
        prompt: context ? `${context}\n\n${prompt}` : prompt,
        stream: true,
        options: {
          temperature: this.config.chat.parameters.temperature,  // 使用配置的温度
          top_p: this.config.chat.parameters.top_p,             // 使用配置的 top_p
          top_k: this.config.chat.parameters.top_k,             // 使用配置的 top_k
          max_tokens: this.config.chat.parameters.max_tokens     // 使用配置的最大令牌数
        }
      }, {
        responseType: 'stream'
      })

      for await (const chunk of response.data) {
        const text = chunk.toString()
        try {
          const data = JSON.parse(text)
          if (data.response) {
            yield data.response
          }
        } catch (e) {
          logger.warn('Failed to parse stream chunk:', {
            error: e.message,
            chunk: text
          })
        }
      }
    } catch (error) {
      logger.error('Failed to generate stream response:', {
        error: error.message,
        model: this.config.chat.model,
        parameters: this.config.chat.parameters
      })
      throw error
    }
  }

  getConfig() {
    // 从全局配置中获取最新的配置
    return {
      endpoint: config.ollama.endpoint,
      chat: {
        model: config.ollama.chat.model,
        parameters: config.ollama.chat.parameters
      },
      embedding: {
        model: config.ollama.embedding.model,
        parameters: config.ollama.embedding.parameters
      }
    }
  }

  async updateConfig(newConfig) {
    try {
      // 更新全局配置
      const updatedConfig = {
        ...config,
        ollama: {
          ...config.ollama,
          endpoint: newConfig.endpoint,
          chat: {
            model: newConfig.chat.model,
            parameters: {
              ...config.ollama.chat.parameters,
              ...newConfig.chat.parameters
            }
          },
          embedding: {
            model: newConfig.embedding.model,
            parameters: {
              ...config.ollama.embedding.parameters,
              ...newConfig.embedding.parameters
            }
          }
        }
      }

      // 使用全局 saveConfig 方法保存配置
      await saveConfig(updatedConfig)
      
      // 更新本地配置引用
      this.config = config.ollama
      
      logger.info('Model config updated and saved')
      return this.getConfig()
    } catch (error) {
      logger.error('Failed to update model config:', error)
      throw error
    }
  }

  async initialize() {
    try {
      // 确保使用最新的全局配置
      this.config = config.ollama
      
      // 初始化服务
      await this.init()
      
      logger.info('Model service initialized with configuration')
    } catch (error) {
      logger.error('Failed to initialize model service:', error)
      throw error
    }
  }

  // 获取模型状态
  async getModelStatus() {
    try {
      const chatModel = this.config.chat.model
      const embeddingModel = this.config.embedding.model

      // 检查模型状态
      const status = {
        connected: true,
        chat: {
          model: chatModel,
          ready: false,
          loading: false
        },
        embedding: {
          model: embeddingModel,
          ready: false,
          loading: false
        }
      }

      // 获取模型列表
      const models = await this.listModels()
      
      // 更新状态
      for (const model of models) {
        if (model.name === chatModel) {
          status.chat.ready = model.status === 'ready'
          status.chat.loading = model.status === 'loading'
        }
        if (model.name === embeddingModel) {
          status.embedding.ready = model.status === 'ready'
          status.embedding.loading = model.status === 'loading'
        }
      }

      return status
    } catch (error) {
      logger.error('Failed to get model status:', error)
      return {
        connected: false,
        chat: { ready: false, loading: false },
        embedding: { ready: false, loading: false }
      }
    }
  }
}

// 创建服务实例并初始化
export const modelService = new ModelService()
modelService.initialize().catch(error => {
  logger.error('Failed to initialize model service:', error)
})