/**
 * AI Provider Adapter - 统一不同AI服务的接口
 * 基于ai-writer-lite项目的实现，适配IntelliMark项目需求
 */

import { BaseLanguageModel } from "@langchain/core/language_models/base"
import { BaseMessage, HumanMessage, AIMessage, SystemMessage } from "@langchain/core/messages"
import { ChatOpenAI } from "@langchain/openai"
import { ChatAnthropic } from "@langchain/anthropic"

export type AIProvider = 'openai' | 'anthropic' | 'deepseek' | 'local' | 'longcat'

export interface AIRequestOptions {
  temperature?: number
  maxTokens?: number
  timeout?: number
  retries?: number
  stream?: boolean
}

export interface AIStreamOptions extends AIRequestOptions {
  onProgress?: (chunk: string) => void
  onError?: (error: Error) => void
  onComplete?: (result: string) => void
}

export interface AIResponse {
  content: string
  usage?: TokenUsage
  model: string
  provider: AIProvider
  metadata?: Record<string, any>
}

export interface TokenUsage {
  promptTokens: number
  completionTokens: number
  totalTokens: number
  cost?: number
}

export interface ProviderConfig {
  apiKey: string
  baseURL?: string
  model: string
  temperature?: number
  maxTokens?: number
  timeout?: number
  retries?: number
}

export abstract class BaseAIProviderAdapter {
  protected config: ProviderConfig
  protected provider: AIProvider

  constructor(provider: AIProvider, config: ProviderConfig) {
    this.provider = provider
    this.config = config
  }

  abstract createModel(options?: AIRequestOptions): BaseLanguageModel
  abstract generateResponse(messages: BaseMessage[], options?: AIRequestOptions): Promise<AIResponse>
  abstract generateStream(messages: BaseMessage[], options?: AIRequestOptions): AsyncGenerator<string>
  abstract calculateCost(usage: TokenUsage): number

  protected formatMessages(messages: BaseMessage[]): any[] {
    return messages.map(msg => {
      if (msg instanceof HumanMessage) {
        return { role: 'user', content: msg.content }
      } else if (msg instanceof AIMessage) {
        return { role: 'assistant', content: msg.content }
      } else if (msg instanceof SystemMessage) {
        return { role: 'system', content: msg.content }
      }
      return { role: 'user', content: msg.content }
    })
  }

  protected async handleError(error: any): Promise<never> {
    console.error(`AI Provider Error (${this.provider}):`, error)

    if (error.status === 401) {
      throw new Error(`API密钥无效或已过期 (${this.provider})`)
    } else if (error.status === 429) {
      throw new Error(`请求频率超限，请稍后重试 (${this.provider})`)
    } else if (error.status === 500) {
      throw new Error(`服务器内部错误，请稍后重试 (${this.provider})`)
    } else if (error.code === 'ECONNABORTED') {
      throw new Error(`请求超时 (${this.provider})`)
    } else if (error.message?.includes('ENOTFOUND')) {
      throw new Error(`网络连接失败，请检查网络设置 (${this.provider})`)
    }

    throw new Error(`AI服务调用失败: ${error.message || error}`)
  }

  public getProvider(): AIProvider {
    return this.provider
  }

  public getConfig(): ProviderConfig {
    return { ...this.config }
  }
}

export class OpenAIProviderAdapter extends BaseAIProviderAdapter {
  private model: ChatOpenAI | null = null

  constructor(config: ProviderConfig) {
    super('openai', config)
  }

  createModel(options: AIRequestOptions = {}): ChatOpenAI {
    this.model = new ChatOpenAI({
      modelName: this.config.model,
      openAIApiKey: this.config.apiKey,
      temperature: options.temperature ?? this.config.temperature ?? 0.7,
      maxTokens: options.maxTokens ?? this.config.maxTokens ?? 2000,
      timeout: options.timeout ?? this.config.timeout ?? 60000,
      maxRetries: options.retries ?? this.config.retries ?? 3,
      streaming: options.stream ?? false,
    })

    return this.model
  }

  async generateResponse(messages: BaseMessage[], options: AIRequestOptions = {}): Promise<AIResponse> {
    try {
      const model = this.model || this.createModel(options)
      const response = await model.invoke(messages)

      return {
        content: response.content as string,
        usage: (response as any).usage ? {
          promptTokens: (response as any).usage.prompt_tokens,
          completionTokens: (response as any).usage.completion_tokens,
          totalTokens: (response as any).usage.total_tokens,
          cost: this.calculateCost((response as any).usage),
        } : undefined,
        model: this.config.model,
        provider: this.provider,
        metadata: {
          finishReason: (response as any).response_metadata?.finish_reason,
        },
      }
    } catch (error) {
      return this.handleError(error)
    }
  }

  async* generateStream(messages: BaseMessage[], options: AIRequestOptions = {}): AsyncGenerator<string> {
    try {
      const model = this.createModel({ ...options, stream: true })
      const stream = await model.stream(messages)

      for await (const chunk of stream) {
        yield chunk.content as string
      }
    } catch (error) {
      await this.handleError(error)
    }
  }

  calculateCost(usage: TokenUsage): number {
    // OpenAI GPT-4 定价
    const promptPrice = 0.03 / 1000
    const completionPrice = 0.06 / 1000

    return (usage.promptTokens * promptPrice) + (usage.completionTokens * completionPrice)
  }
}

export class AnthropicProviderAdapter extends BaseAIProviderAdapter {
  private model: ChatAnthropic | null = null

  constructor(config: ProviderConfig) {
    super('anthropic', config)
  }

  createModel(options: AIRequestOptions = {}): ChatAnthropic {
    this.model = new ChatAnthropic({
      modelName: this.config.model,
      anthropicApiKey: this.config.apiKey,
      temperature: options.temperature ?? this.config.temperature ?? 0.7,
      maxTokens: options.maxTokens ?? this.config.maxTokens ?? 2000,
      maxRetries: options.retries ?? this.config.retries ?? 3,
      streaming: options.stream ?? false,
      anthropicApiUrl: this.config.baseURL,
    })

    return this.model
  }

  async generateResponse(messages: BaseMessage[], options: AIRequestOptions = {}): Promise<AIResponse> {
    try {
      const model = this.model || this.createModel(options)
      const response = await model.invoke(messages)

      return {
        content: response.content as string,
        usage: (response as any).usage ? {
          promptTokens: (response as any).usage.input_tokens,
          completionTokens: (response as any).usage.output_tokens,
          totalTokens: (response as any).usage.input_tokens + (response as any).usage.output_tokens,
          cost: this.calculateCost((response as any).usage),
        } : undefined,
        model: this.config.model,
        provider: this.provider,
        metadata: {
          stopReason: (response as any).response_metadata?.stop_reason,
        },
      }
    } catch (error) {
      return this.handleError(error)
    }
  }

  async* generateStream(messages: BaseMessage[], options: AIRequestOptions = {}): AsyncGenerator<string> {
    try {
      const model = this.createModel({ ...options, stream: true })
      const stream = await model.stream(messages)

      for await (const chunk of stream) {
        yield chunk.content as string
      }
    } catch (error) {
      await this.handleError(error)
    }
  }

  calculateCost(usage: TokenUsage): number {
    // Claude 3 Sonnet 定价
    const promptPrice = 0.003 / 1000
    const completionPrice = 0.015 / 1000

    return (usage.promptTokens * promptPrice) + (usage.completionTokens * completionPrice)
  }
}

export class DeepSeekProviderAdapter extends BaseAIProviderAdapter {
  private model: ChatOpenAI | null = null

  constructor(config: ProviderConfig) {
    super('deepseek', config)
  }

  createModel(options: AIRequestOptions = {}): ChatOpenAI {
    this.model = new ChatOpenAI({
      modelName: this.config.model,
      openAIApiKey: this.config.apiKey,
      temperature: options.temperature ?? this.config.temperature ?? 0.7,
      maxTokens: options.maxTokens ?? this.config.maxTokens ?? 2000,
      timeout: options.timeout ?? this.config.timeout ?? 60000,
      maxRetries: options.retries ?? this.config.retries ?? 3,
      streaming: options.stream ?? false,
      configuration: {
        baseURL: this.config.baseURL || 'https://api.deepseek.com/v1',
      },
    })

    return this.model
  }

  async generateResponse(messages: BaseMessage[], options: AIRequestOptions = {}): Promise<AIResponse> {
    try {
      const model = this.model || this.createModel(options)
      const response = await model.invoke(messages)

      return {
        content: response.content as string,
        usage: (response as any).usage ? {
          promptTokens: (response as any).usage.prompt_tokens,
          completionTokens: (response as any).usage.completion_tokens,
          totalTokens: (response as any).usage.total_tokens,
          cost: this.calculateCost((response as any).usage),
        } : undefined,
        model: this.config.model,
        provider: this.provider,
      }
    } catch (error) {
      return this.handleError(error)
    }
  }

  async* generateStream(messages: BaseMessage[], options: AIRequestOptions = {}): AsyncGenerator<string> {
    try {
      const model = this.createModel({ ...options, stream: true })
      const stream = await model.stream(messages)

      for await (const chunk of stream) {
        yield chunk.content as string
      }
    } catch (error) {
      await this.handleError(error)
    }
  }

  calculateCost(usage: TokenUsage): number {
    // DeepSeek 定价
    const promptPrice = 0.001 / 1000
    const completionPrice = 0.002 / 1000

    return (usage.promptTokens * promptPrice) + (usage.completionTokens * completionPrice)
  }
}

export class LocalProviderAdapter extends BaseAIProviderAdapter {
  private model: ChatOpenAI | null = null

  constructor(config: ProviderConfig) {
    super('local', config)
  }

  createModel(options: AIRequestOptions = {}): ChatOpenAI {
    if (!this.config.baseURL) {
      throw new Error('本地模型需要配置baseURL (例如: http://localhost:11434)')
    }

    this.model = new ChatOpenAI({
      modelName: this.config.model,
      openAIApiKey: 'not-required', // Ollama不需要API key
      temperature: options.temperature ?? this.config.temperature ?? 0.7,
      maxTokens: options.maxTokens ?? this.config.maxTokens ?? 2000,
      timeout: options.timeout ?? this.config.timeout ?? 60000,
      maxRetries: options.retries ?? this.config.retries ?? 1, // 本地模型重试次数减少
      streaming: options.stream ?? false,
      configuration: {
        baseURL: this.config.baseURL,
      },
    })

    return this.model
  }

  async generateResponse(messages: BaseMessage[], options: AIRequestOptions = {}): Promise<AIResponse> {
    try {
      const model = this.model || this.createModel(options)
      const response = await model.invoke(messages)

      return {
        content: response.content as string,
        model: this.config.model,
        provider: this.provider,
      }
    } catch (error) {
      return this.handleError(error)
    }
  }

  async* generateStream(messages: BaseMessage[], options: AIRequestOptions = {}): AsyncGenerator<string> {
    try {
      const model = this.createModel({ ...options, stream: true })
      const stream = await model.stream(messages)

      for await (const chunk of stream) {
        yield chunk.content as string
      }
    } catch (error) {
      await this.handleError(error)
    }
  }

  calculateCost(usage: TokenUsage): number {
    // 本地模型无成本
    return 0
  }
}

export class LongCatProviderAdapter extends BaseAIProviderAdapter {
  private model: ChatOpenAI | null = null

  constructor(config: ProviderConfig) {
    super('longcat', config)
  }

  createModel(options: AIRequestOptions = {}): ChatOpenAI {
    this.model = new ChatOpenAI({
      modelName: this.config.model,
      openAIApiKey: this.config.apiKey,
      temperature: options.temperature ?? this.config.temperature ?? 0.7,
      maxTokens: options.maxTokens ?? this.config.maxTokens ?? 2000,
      timeout: options.timeout ?? this.config.timeout ?? 60000,
      maxRetries: options.retries ?? this.config.retries ?? 3,
      streaming: options.stream ?? false,
      configuration: {
        baseURL: this.config.baseURL || 'https://api.longcat.chat/openai',
      },
    })

    return this.model
  }

  async generateResponse(messages: BaseMessage[], options: AIRequestOptions = {}): Promise<AIResponse> {
    try {
      const model = this.model || this.createModel(options)
      const response = await model.invoke(messages)

      return {
        content: response.content as string,
        usage: (response as any).usage ? {
          promptTokens: (response as any).usage.prompt_tokens,
          completionTokens: (response as any).usage.completion_tokens,
          totalTokens: (response as any).usage.total_tokens,
          cost: this.calculateCost((response as any).usage),
        } : undefined,
        model: this.config.model,
        provider: this.provider,
      }
    } catch (error) {
      return this.handleError(error)
    }
  }

  async* generateStream(messages: BaseMessage[], options: AIRequestOptions = {}): AsyncGenerator<string> {
    try {
      const model = this.createModel({ ...options, stream: true })
      const stream = await model.stream(messages)

      for await (const chunk of stream) {
        yield chunk.content as string
      }
    } catch (error) {
      await this.handleError(error)
    }
  }

  calculateCost(usage: TokenUsage): number {
    // LongCat 定价: 每日10万token免费额度，超出部分按量计费
    // 这里使用近似定价，实际以官方为准
    const promptPrice = 0.0005 / 1000  // 非常优惠的定价
    const completionPrice = 0.001 / 1000

    return (usage.promptTokens * promptPrice) + (usage.completionTokens * completionPrice)
  }
}

export class AIProviderManager {
  private providers: Map<string, BaseAIProviderAdapter> = new Map()
  private defaultProvider: string = 'openai'

  public registerProvider(name: string, adapter: BaseAIProviderAdapter): void {
    this.providers.set(name, adapter)
  }

  public getProvider(name?: string): BaseAIProviderAdapter {
    const providerName = name || this.defaultProvider
    const provider = this.providers.get(providerName)

    if (!provider) {
      throw new Error(`Provider not found: ${providerName}`)
    }

    return provider
  }

  public setDefaultProvider(name: string): void {
    if (!this.providers.has(name)) {
      throw new Error(`Cannot set default provider: ${name} not registered`)
    }
    this.defaultProvider = name
  }

  public listProviders(): string[] {
    return Array.from(this.providers.keys())
  }

  public async generateResponse(
    messages: BaseMessage[],
    providerName?: string,
    options?: AIRequestOptions
  ): Promise<AIResponse> {
    const provider = this.getProvider(providerName)
    return await provider.generateResponse(messages, options)
  }

  public async* generateStream(
    messages: BaseMessage[],
    providerName?: string,
    options?: AIRequestOptions
  ): AsyncGenerator<string> {
    const provider = this.getProvider(providerName)
    yield* provider.generateStream(messages, options)
  }
}