import { IModelService } from './interfaces/IModelService';
import { ICacheService } from './interfaces/ICacheService';
import { ModelInfo, LLMProvider, LLMAdapter } from '../types/index';
import { OpenAIAdapter } from './adapters/OpenAIAdapter';
import { AnthropicAdapter } from './adapters/AnthropicAdapter';
import { OpenRouterAdapter } from './adapters/OpenRouterAdapter';
import { OpenAICompatibleAdapter } from './adapters/OpenAICompatibleAdapter';

export class ModelService implements IModelService {
  private adapters: Map<LLMProvider, LLMAdapter>;
  private cacheService: ICacheService;
  private readonly CACHE_EXPIRY_MS = 24 * 60 * 60 * 1000; // 24小时

  constructor(cacheService: ICacheService) {
    this.cacheService = cacheService;
    this.adapters = new Map();
    this.initializeAdapters();
  }

  private initializeAdapters(): void {
    this.adapters.set('openai', new OpenAIAdapter());
    this.adapters.set('anthropic', new AnthropicAdapter());
    this.adapters.set('openrouter', new OpenRouterAdapter());
    this.adapters.set('openai-compatible', new OpenAICompatibleAdapter());
  }

  async getModels(
    provider: LLMProvider, 
    forceRefresh = false,
    config?: { baseURL?: string; apiKey?: string }
  ): Promise<ModelInfo[]> {
    const cacheKey = `models_${provider}_${config?.baseURL || 'default'}`;

    // 如果不强制刷新，先尝试获取缓存
    if (!forceRefresh) {
      const cached = await this.cacheService.get<ModelInfo[]>(cacheKey);
      if (cached) {
        return cached;
      }
    }

    // 获取适配器
    const adapter = this.adapters.get(provider);
    if (!adapter) {
      throw new Error(`不支持的LLM供应商: ${provider}`);
    }

    try {
      // 从适配器获取模型列表，传递配置参数
      const models = await adapter.fetchModels(config?.baseURL, config?.apiKey);
      
      // 缓存结果
      await this.cacheService.set(cacheKey, models, this.CACHE_EXPIRY_MS);
      
      return models;
    } catch (error) {
      console.error(`获取${provider}模型列表失败:`, error);
      
      // 如果获取失败，尝试返回缓存的数据（即使过期）
      const staleCache = await this.getCachedModels(provider);
      if (staleCache && staleCache.length > 0) {
        return staleCache;
      }
      
      // 如果没有缓存，返回空数组
      return [];
    }
  }

  async refreshModels(
    provider: LLMProvider,
    config?: { baseURL?: string; apiKey?: string }
  ): Promise<void> {
    await this.getModels(provider, true, config);
  }

  getCachedModels(provider: LLMProvider): ModelInfo[] | null {
    // 注意：这个方法是同步的，但我们需要异步的缓存服务
    // 在实际实现中，这里可能需要改为异步或者维护一个内存缓存
    return null;
  }

  async clearCache(provider: LLMProvider): Promise<void> {
    const cacheKey = `models_${provider}`;
    await this.cacheService.delete(cacheKey);
  }

  async clearAllCache(): Promise<void> {
    const providers: LLMProvider[] = ['openai', 'anthropic', 'openrouter', 'openai-compatible'];
    for (const provider of providers) {
      await this.clearCache(provider);
    }
  }

  /**
   * 获取所有支持的供应商
   */
  getSupportedProviders(): LLMProvider[] {
    return Array.from(this.adapters.keys());
  }

  /**
   * 检查供应商是否支持
   */
  isProviderSupported(provider: string): provider is LLMProvider {
    return this.adapters.has(provider as LLMProvider);
  }

  /**
   * 获取指定供应商的适配器（用于验证等操作）
   */
  getAdapter(provider: LLMProvider): LLMAdapter | undefined {
    return this.adapters.get(provider);
  }
}