/**
 * LangChain配置和初始化 - IntelliMark适配版
 * 从ai-writer-lite项目迁移并优化
 */

import { ChatOpenAI } from "@langchain/openai";
import { ChatAnthropic } from "@langchain/anthropic";
import { HumanMessage, AIMessage, SystemMessage } from "@langchain/core/messages";
import { MemoryConfig } from "../types/memory";
import { AgentConfig } from "../types/agent";

// LangChain配置接口
export interface LangChainGlobalConfig {
  providers: AIProviderConfig;
  memory: MemoryConfig;
  tools: ToolRegistryConfig;
  agents: AgentRegistryConfig;
  logging: LoggingConfig;
  performance: PerformanceConfig;
}

// AI提供商配置
export interface AIProviderConfig {
  default: string;
  models: Record<string, ModelConfig>;
  fallback: string[];
  rateLimits: Record<string, RateLimitConfig>;
}

// 模型配置
export interface ModelConfig {
  provider: 'openai' | 'anthropic' | 'deepseek' | 'local';
  model: string;
  apiKey: string;
  baseURL?: string;
  temperature?: number;
  maxTokens?: number;
  timeout?: number;
  retries?: number;
}

// 速率限制配置
export interface RateLimitConfig {
  requestsPerMinute: number;
  requestsPerHour: number;
  tokensPerMinute: number;
  concurrent: number;
}

// 工具注册配置
export interface ToolRegistryConfig {
  autoRegister: boolean;
  toolPaths: string[];
  disabledTools: string[];
  customTools: Record<string, any>;
}

// Agent注册配置
export interface AgentRegistryConfig {
  defaultAgent: string;
  agentConfigs: Record<string, AgentConfig>;
  capabilities: string[];
}

// 日志配置
export interface LoggingConfig {
  level: 'debug' | 'info' | 'warn' | 'error';
  includeToolCalls: boolean;
  includeThoughts: boolean;
  includePerformance: boolean;
  maxLogSize: number;
  retentionDays: number;
}

// 性能配置
export interface PerformanceConfig {
  enableCaching: boolean;
  cacheSize: number;
  cacheTTL: number;
  enableProfiling: boolean;
  maxConcurrentExecutions: number;
  defaultTimeout: number;
}

// 默认配置 - 适配IntelliMark
export const DEFAULT_LANGCHAIN_CONFIG: LangChainGlobalConfig = {
  providers: {
    default: 'openai-gpt4',
    models: {
      'openai-gpt4': {
        provider: 'openai',
        model: 'gpt-4',
        apiKey: process.env.OPENAI_API_KEY || '',
        temperature: 0.7,
        maxTokens: 4000,
        timeout: 60000,
        retries: 3,
      },
      'openai-gpt35': {
        provider: 'openai',
        model: 'gpt-3.5-turbo',
        apiKey: process.env.OPENAI_API_KEY || '',
        temperature: 0.7,
        maxTokens: 4000,
        timeout: 60000,
        retries: 3,
      },
      'anthropic-claude': {
        provider: 'anthropic',
        model: 'claude-3-sonnet-20240229',
        apiKey: process.env.ANTHROPIC_API_KEY || '',
        temperature: 0.7,
        maxTokens: 4000,
        timeout: 60000,
        retries: 3,
      },
      'deepseek-chat': {
        provider: 'deepseek',
        model: 'deepseek-chat',
        apiKey: process.env.DEEPSEEK_API_KEY || '',
        baseURL: 'https://api.deepseek.com',
        temperature: 0.7,
        maxTokens: 4000,
        timeout: 60000,
        retries: 3,
      },
      'local-ollama': {
        provider: 'local',
        model: 'llama2',
        apiKey: 'not-required',
        baseURL: 'http://localhost:11434',
        temperature: 0.7,
        maxTokens: 4000,
        timeout: 120000,
        retries: 1,
      },
    },
    fallback: ['openai-gpt35', 'anthropic-claude'],
    rateLimits: {
      'openai': {
        requestsPerMinute: 60,
        requestsPerHour: 3600,
        tokensPerMinute: 90000,
        concurrent: 5,
      },
      'anthropic': {
        requestsPerMinute: 50,
        requestsPerHour: 3000,
        tokensPerMinute: 80000,
        concurrent: 3,
      },
      'deepseek': {
        requestsPerMinute: 100,
        requestsPerHour: 6000,
        tokensPerMinute: 120000,
        concurrent: 5,
      },
      'local': {
        requestsPerMinute: 30,
        requestsPerHour: 1000,
        tokensPerMinute: 60000,
        concurrent: 2,
      },
    },
  },
  memory: {
    conversational: {
      maxMessages: 20,
      returnMessages: true,
      autoSave: true,
    },
    vector: {
      enabled: true,
      vectorStore: 'chroma',
      embeddingModel: 'text-embedding-ada-002',
      dimension: 1536,
      similarityThreshold: 0.7,
      maxDocuments: 10000,
    },
    episodic: {
      enabled: true,
      maxEpisodes: 1000,
      importanceThreshold: 0.5,
      autoConsolidation: true,
    },
    knowledge: {
      enabled: false,
      graphDatabase: 'neo4j',
      nodeTypes: ['character', 'location', 'concept', 'event'],
      edgeTypes: ['related_to', 'part_of', 'located_at', 'caused_by'],
      confidenceThreshold: 0.7,
    },
    working: {
      capacity: 10,
      autoCleanup: true,
      maxTaskAge: 30,
    },
    longTerm: {
      enabled: false,
      consolidationInterval: 24,
      forgettingEnabled: true,
      reinforcementEnabled: true,
    },
  },
  tools: {
    autoRegister: true,
    toolPaths: ['./src/tools/**'],
    disabledTools: [],
    customTools: {},
  },
  agents: {
    defaultAgent: 'intellimark-assistant',
    agentConfigs: {},
    capabilities: [
      'text-generation',
      'text-analysis',
      'memory-management',
      'markdown-assistance',
      'document-management',
    ],
  },
  logging: {
    level: 'info',
    includeToolCalls: true,
    includeThoughts: true,
    includePerformance: true,
    maxLogSize: 50 * 1024 * 1024, // 50MB
    retentionDays: 30,
  },
  performance: {
    enableCaching: true,
    cacheSize: 1000,
    cacheTTL: 3600, // 1小时
    enableProfiling: true,
    maxConcurrentExecutions: 3,
    defaultTimeout: 30000, // 30秒
  },
};

// LangChain管理器类
export class LangChainManager {
  private static instance: LangChainManager;
  private config: LangChainGlobalConfig;
  private models: Map<string, any> = new Map();
  private isInitialized = false;

  private constructor() {
    this.config = this.loadConfig();
  }

  public static getInstance(): LangChainManager {
    if (!LangChainManager.instance) {
      LangChainManager.instance = new LangChainManager();
    }
    return LangChainManager.instance;
  }

  private loadConfig(): LangChainGlobalConfig {
    try {
      // 尝试从环境变量和配置文件加载
      const envConfig = this.loadFromEnvironment();
      const fileConfig = this.loadFromConfigFile();

      return {
        ...DEFAULT_LANGCHAIN_CONFIG,
        ...fileConfig,
        ...envConfig,
      };
    } catch (error) {
      console.warn('Failed to load custom config, using defaults:', error);
      return DEFAULT_LANGCHAIN_CONFIG;
    }
  }

  private loadFromEnvironment(): Partial<LangChainGlobalConfig> {
    const config: any = {};

    // 从环境变量加载API密钥
    if (process.env.OPENAI_API_KEY) {
      config.providers = {
        ...config.providers,
        models: {
          ...config.providers?.models,
          'openai-gpt4': {
            ...DEFAULT_LANGCHAIN_CONFIG.providers.models['openai-gpt4'],
            apiKey: process.env.OPENAI_API_KEY,
          },
          'openai-gpt35': {
            ...DEFAULT_LANGCHAIN_CONFIG.providers.models['openai-gpt35'],
            apiKey: process.env.OPENAI_API_KEY,
          },
        },
      };
    }

    if (process.env.ANTHROPIC_API_KEY) {
      config.providers = {
        ...config.providers,
        models: {
          ...config.providers?.models,
          'anthropic-claude': {
            ...DEFAULT_LANGCHAIN_CONFIG.providers.models['anthropic-claude'],
            apiKey: process.env.ANTHROPIC_API_KEY,
          },
        },
      };
    }

    if (process.env.DEEPSEEK_API_KEY) {
      config.providers = {
        ...config.providers,
        models: {
          ...config.providers?.models,
          'deepseek-chat': {
            ...DEFAULT_LANGCHAIN_CONFIG.providers.models['deepseek-chat'],
            apiKey: process.env.DEEPSEEK_API_KEY,
          },
        },
      };
    }

    // 日志级别
    if (process.env.LANGCHAIN_LOG_LEVEL) {
      config.logging = {
        level: process.env.LANGCHAIN_LOG_LEVEL as any,
      };
    }

    return config;
  }

  private loadFromConfigFile(): Partial<LangChainGlobalConfig> {
    // 尝试从IntelliMark配置文件加载
    try {
      // 这里可以添加从IntelliMark配置文件加载的逻辑
      return {};
    } catch (error) {
      return {};
    }
  }

  public async initialize(): Promise<void> {
    if (this.isInitialized) {
      return;
    }

    console.log('Initializing LangChain for IntelliMark...');

    // 初始化模型
    await this.initializeModels();

    // 初始化内存系统
    await this.initializeMemory();

    // 初始化工具注册表
    await this.initializeTools();

    this.isInitialized = true;
    console.log('LangChain for IntelliMark initialized successfully');
  }

  private async initializeModels(): Promise<void> {
    for (const [modelId, modelConfig] of Object.entries(this.config.providers.models)) {
      try {
        let model;

        switch (modelConfig.provider) {
          case 'openai':
            model = new ChatOpenAI({
              modelName: modelConfig.model,
              openAIApiKey: modelConfig.apiKey,
              temperature: modelConfig.temperature,
              maxTokens: modelConfig.maxTokens,
              timeout: modelConfig.timeout,
              maxRetries: modelConfig.retries,
            });
            break;

          case 'anthropic':
            model = new ChatAnthropic({
              model: modelConfig.model,
              anthropicApiKey: modelConfig.apiKey,
              temperature: modelConfig.temperature,
              maxTokens: modelConfig.maxTokens,
              timeout: modelConfig.timeout,
              maxRetries: modelConfig.retries,
            });
            break;

          case 'deepseek':
          case 'local':
            model = new ChatOpenAI({
              modelName: modelConfig.model,
              openAIApiKey: modelConfig.apiKey,
              temperature: modelConfig.temperature,
              maxTokens: modelConfig.maxTokens,
              timeout: modelConfig.timeout,
              maxRetries: modelConfig.retries,
              baseURL: modelConfig.baseURL,
            });
            break;

          default:
            console.warn(`Unsupported provider: ${modelConfig.provider}`);
            continue;
        }

        this.models.set(modelId, model);
        console.log(`Initialized model: ${modelId}`);
      } catch (error) {
        console.error(`Failed to initialize model ${modelId}:`, error);
      }
    }
  }

  private async initializeMemory(): Promise<void> {
    // 初始化内存系统 - 适配IntelliMark需求
    console.log('Memory system initialized for IntelliMark');
  }

  private async initializeTools(): Promise<void> {
    // 初始化工具注册表 - 适配Markdown编辑需求
    console.log('Tool registry initialized for IntelliMark');
  }

  public getModel(modelId?: string): any {
    const targetModelId = modelId || this.config.providers.default;
    const model = this.models.get(targetModelId);

    if (!model) {
      throw new Error(`Model not found: ${targetModelId}`);
    }

    return model;
  }

  public getConfig(): LangChainGlobalConfig {
    return { ...this.config };
  }

  public updateConfig(updates: Partial<LangChainGlobalConfig>): void {
    this.config = {
      ...this.config,
      ...updates,
      providers: {
        ...this.config.providers,
        ...updates.providers,
      },
      memory: {
        ...this.config.memory,
        ...updates.memory,
      },
    };
  }

  public getAvailableModels(): string[] {
    return Array.from(this.models.keys());
  }

  public isReady(): boolean {
    return this.isInitialized;
  }

  public async reset(): Promise<void> {
    this.models.clear();
    this.isInitialized = false;
    await this.initialize();
  }
}

// 导出便捷函数
export const getLangChainManager = (): LangChainManager => {
  return LangChainManager.getInstance();
};

export const initializeLangChain = async (): Promise<void> => {
  const manager = getLangChainManager();
  await manager.initialize();
};

export const getModel = (modelId?: string): any => {
  const manager = getLangChainManager();
  return manager.getModel(modelId);
};

export const getConfig = (): LangChainGlobalConfig => {
  const manager = getLangChainManager();
  return manager.getConfig();
};

// 导出默认配置
export { DEFAULT_LANGCHAIN_CONFIG };