import type { ApiResponse, Interceptor, OllamaRequest, OllamaResponse } from '@/types/ollama'

export class OllamaService {
  private baseURL: string
  private baseUrl: string
  private interceptors: Interceptor<OllamaRequest, OllamaResponse>[] = []

  constructor(baseURL: string = '/ollama') {
    this.baseURL = baseURL
    this.baseUrl = baseURL
  }

  // 添加拦截器
  addInterceptor(interceptor: Interceptor<OllamaRequest, OllamaResponse>) {
    this.interceptors.push(interceptor)
  }

  // 生成文本
  async generate(request: OllamaRequest): Promise<ApiResponse<OllamaResponse>> {
    try {
      // Ensure stream is explicitly false for this endpoint logic
      const finalPayload = await this.executeRequestInterceptors({
        ...request,
        stream: false,
      })

      const response = await fetch(`${this.baseUrl}/api/generate`, {
        method: 'POST',
        headers: { 'Content-Type': 'application/json' },
        body: JSON.stringify(finalPayload),
      })

      // --- Remove Old Debug Logging ---
      // const rawText = await response.clone().text();
      // console.warn('[OllamaService] Raw response text:\n', rawText);
      // --- End Remove Debugging ---

      if (!response.ok) {
        // Attempt to read error message from body for better debugging
        let errorBody = ''
        try {
          errorBody = await response.text()
        } catch {}
        throw new Error(`HTTP error! status: ${response.status}, message: ${errorBody || response.statusText}`)
      }

      // --- NEW: Handle potential streaming response even if stream=false ---
      const reader = response.body?.getReader()
      if (!reader) {
        throw new Error('Failed to get response reader')
      }

      let accumulatedResponse = ''
      let lastChunk: null | OllamaResponse = null
      const decoder = new TextDecoder()

      while (true) {
        const { done, value } = await reader.read()
        if (done) break

        const chunkText = decoder.decode(value, { stream: true }) // Use stream: true for potentially incomplete UTF-8 sequences
        // Ollama often sends multiple JSON lines per chunk
        const lines = chunkText.split('\n').filter(line => line.trim() !== '')

        for (const line of lines) {
          try {
            const parsedChunk: OllamaResponse = JSON.parse(line)
            if (parsedChunk.response) {
              // Check if response field exists
              accumulatedResponse += parsedChunk.response
            }
            lastChunk = parsedChunk // Keep track of the last chunk for final metadata
            if (parsedChunk.done) {
              // If Ollama respects stream:false fully, this might be the only chunk.
              // If it streams anyway, this marks the end.
              break // Exit inner loop once done is true
            }
          } catch (parseError) {
            console.error('[OllamaService] Error parsing streamed JSON line:', parseError)
            console.error('[OllamaService] Problematic line:', line)
            // Decide how to handle partial errors - here we throw
            throw new Error(`Failed to parse chunk: ${line.slice(0, 100)}...`)
          }
        }
        // Check if the last chunk processed indicated completion
        if (lastChunk?.done) {
          break // Exit outer while loop
        }
      }
      // --- End Stream Handling ---

      if (!lastChunk) {
        throw new Error('No response chunks received from Ollama.')
      }

      // Construct the final response object
      const finalData: OllamaResponse = {
        ...lastChunk, // Use metadata from the last chunk
        response: accumulatedResponse, // Use accumulated text
        done: true, // Ensure done is true
      }

      const interceptedData = await this.executeResponseInterceptors(finalData)
      return { code: 200, message: 'Success', data: interceptedData, timestamp: Date.now() }
    } catch (error) {
      // 执行错误拦截器
      const interceptedError = await this.executeErrorInterceptors(error as Error)

      return {
        code: 500,
        message: interceptedError.message,
        data: null,
        timestamp: Date.now(),
      }
    }
  }

  // 流式生成文本
  async generateStream(request: OllamaRequest, onChunk: (chunk: OllamaResponse) => void): Promise<void> {
    try {
      // 执行请求拦截器
      const interceptedRequest = await this.executeRequestInterceptors({
        ...request,
        stream: true,
      })

      // 发送请求
      const response = await fetch(`${this.baseURL}/api/generate`, {
        method: 'POST',
        headers: {
          'Content-Type': 'application/json',
        },
        body: JSON.stringify(interceptedRequest),
      })

      if (!response.ok) {
        throw new Error(`HTTP error! status: ${response.status}`)
      }

      const reader = response.body?.getReader()
      if (!reader) {
        throw new Error('Failed to get response reader')
      }

      while (true) {
        const { done, value } = await reader.read()
        if (done) break

        const chunk = new TextDecoder().decode(value)
        const lines = chunk.split('\n').filter(Boolean)

        for (const line of lines) {
          const data: OllamaResponse = JSON.parse(line)
          // 执行响应拦截器
          const interceptedChunk = await this.executeResponseInterceptors(data)
          onChunk(interceptedChunk)
        }
      }
    } catch (error) {
      // 执行错误拦截器
      const interceptedError = await this.executeErrorInterceptors(error as Error)
      throw interceptedError
    }
  }

  // Added getter for base URL
  getBaseUrl(): string {
    return this.baseUrl
  }

  // 移除拦截器
  removeInterceptor(interceptor: Interceptor<OllamaRequest, OllamaResponse>) {
    const index = this.interceptors.indexOf(interceptor)
    if (index !== -1) {
      this.interceptors.splice(index, 1)
    }
  }

  // 执行错误拦截器
  private async executeErrorInterceptors(error: Error): Promise<Error> {
    let result = error
    for (const interceptor of this.interceptors) {
      if (interceptor.onError) {
        result = await interceptor.onError(result)
      }
    }
    return result
  }

  // 执行请求拦截器
  private async executeRequestInterceptors(config: OllamaRequest): Promise<OllamaRequest> {
    let result = config
    for (const interceptor of this.interceptors) {
      if (interceptor.onRequest) {
        result = await interceptor.onRequest(result)
      }
    }
    return result
  }

  // 执行响应拦截器
  private async executeResponseInterceptors(response: OllamaResponse): Promise<OllamaResponse> {
    let result = response
    for (const interceptor of this.interceptors) {
      if (interceptor.onResponse) {
        result = await interceptor.onResponse(result)
      }
    }
    return result
  }
}
