/**
 * AI提供商适配器 - IntelliMark增强版
 * 整合现有的AIProviderAdapter与新架构
 */

import { BaseLLM } from "@langchain/core/language_models/base";
import { BaseMessage, HumanMessage, AIMessage, SystemMessage } from "@langchain/core/messages";
import { CallbackManager } from "@langchain/core/callbacks/manager";
import { LangChainGlobalConfig } from "./langChainConfig";

// 重新导出现有的适配器类型，保持兼容性
export {
  AIProvider,
  AIRequestOptions,
  AIStreamOptions,
  AIResponse,
  TokenUsage,
  ProviderConfig,
  BaseAIProviderAdapter,
  OpenAIProviderAdapter,
  AnthropicProviderAdapter,
  DeepSeekProviderAdapter,
  LocalProviderAdapter,
  AIProviderManager
} from '../../services/AIProviderAdapter';

// 增强的AI提供商适配器，整合LangChain功能
export interface EnhancedAIRequestOptions extends AIRequestOptions {
  // LangChain特定选项
  callbacks?: CallbackManager;
  metadata?: Record<string, any>;
  // 流式回调
  onProgress?: (chunk: string) => void;
  onComplete?: (result: string) => void;
  onError?: (error: Error) => void;
}

// 增强的AI响应
export interface EnhancedAIResponse extends AIResponse {
  // LangChain元数据
  langchainMetadata?: {
    runId?: string;
    executionTime?: number;
    toolCalls?: any[];
    intermediateSteps?: any[];
  };
  // 上下文信息
  context?: {
    projectId?: string;
    documentId?: string;
    selectionId?: string;
  };
}

// 批量请求选项
export interface BatchRequestOptions {
  requests: Array<{
    messages: BaseMessage[];
    options?: EnhancedAIRequestOptions;
  }>;
  maxConcurrency?: number;
  timeout?: number;
}

// 批量响应
export interface BatchResponse {
  results: EnhancedAIResponse[];
  errors: Array<{
    index: number;
    error: Error;
  }>;
  totalTokens: number;
  totalCost: number;
  executionTime: number;
}

// 增强的AI提供商管理器
export class EnhancedAIProviderManager extends AIProviderManager {
  private static enhancedInstance: EnhancedAIProviderManager;
  private langchainConfig: LangChainGlobalConfig | null = null;
  private requestStats: Map<string, any> = new Map();

  private constructor() {
    super();
  }

  public static getEnhancedInstance(): EnhancedAIProviderManager {
    if (!EnhancedAIProviderManager.enhancedInstance) {
      EnhancedAIProviderManager.enhancedInstance = new EnhancedAIProviderManager();
    }
    return EnhancedAIProviderManager.enhancedInstance;
  }

  // 设置LangChain配置
  public setLangChainConfig(config: LangChainGlobalConfig): void {
    this.langchainConfig = config;
  }

  // 增强的响应生成，包含LangChain集成
  public async generateEnhancedResponse(
    messages: BaseMessage[],
    providerName?: string,
    options?: EnhancedAIRequestOptions
  ): Promise<EnhancedAIResponse> {
    const startTime = Date.now();
    const provider = this.getProvider(providerName);

    try {
      // 生成基础响应
      const baseResponse = await provider.generateResponse(messages, options);

      // 构建增强响应
      const enhancedResponse: EnhancedAIResponse = {
        ...baseResponse,
        langchainMetadata: {
          runId: this.generateRunId(),
          executionTime: Date.now() - startTime,
          toolCalls: [],
          intermediateSteps: [],
        },
        context: options?.metadata,
      };

      // 记录统计信息
      this.recordRequestStats(providerName, enhancedResponse);

      return enhancedResponse;
    } catch (error) {
      console.error(`Enhanced AI response generation failed:`, error);
      throw error;
    }
  }

  // 增强的流式生成
  public async* generateEnhancedStream(
    messages: BaseMessage[],
    providerName?: string,
    options?: EnhancedAIRequestOptions
  ): AsyncGenerator<string, void, unknown> {
    const startTime = Date.now();
    const provider = this.getProvider(providerName);
    let fullResponse = '';

    try {
      const stream = provider.generateStream(messages, options);

      for await (const chunk of stream) {
        fullResponse += chunk;
        yield chunk;

        // 调用进度回调
        if (options?.onProgress) {
          options.onProgress(chunk);
        }
      }

      // 调用完成回调
      if (options?.onComplete) {
        options.onComplete(fullResponse);
      }

      // 记录统计信息
      const enhancedResponse: EnhancedAIResponse = {
        content: fullResponse,
        model: provider.getConfig().model,
        provider: provider.getProvider(),
        langchainMetadata: {
          runId: this.generateRunId(),
          executionTime: Date.now() - startTime,
        },
      };

      this.recordRequestStats(providerName, enhancedResponse);

    } catch (error) {
      const err = error instanceof Error ? error : new Error(String(error));

      // 调用错误回调
      if (options?.onError) {
        options.onError(err);
      }

      throw err;
    }
  }

  // 批量请求处理
  public async generateBatchResponse(
    options: BatchRequestOptions
  ): Promise<BatchResponse> {
    const startTime = Date.now();
    const maxConcurrency = options.maxConcurrency || 3;
    const results: EnhancedAIResponse[] = [];
    const errors: Array<{ index: number; error: Error }> = [];
    let totalTokens = 0;
    let totalCost = 0;

    // 分批处理请求
    for (let i = 0; i < options.requests.length; i += maxConcurrency) {
      const batch = options.requests.slice(i, i + maxConcurrency);

      const batchPromises = batch.map(async (request, batchIndex) => {
        const globalIndex = i + batchIndex;
        try {
          const response = await this.generateEnhancedResponse(
            request.messages,
            undefined,
            request.options
          );

          results[globalIndex] = response;

          if (response.usage) {
            totalTokens += response.usage.totalTokens;
            totalCost += response.usage.cost || 0;
          }

          return response;
        } catch (error) {
          const err = error instanceof Error ? error : new Error(String(error));
          errors.push({ index: globalIndex, error: err });
          throw err;
        }
      });

      await Promise.allSettled(batchPromises);
    }

    return {
      results: results.filter(Boolean),
      errors,
      totalTokens,
      totalCost,
      executionTime: Date.now() - startTime,
    };
  }

  // 获取提供商统计信息
  public getProviderStats(providerName?: string): any {
    if (providerName) {
      return this.requestStats.get(providerName) || {};
    }

    const allStats: Record<string, any> = {};
    for (const [provider, stats] of this.requestStats.entries()) {
      allStats[provider] = stats;
    }
    return allStats;
  }

  // 重置统计信息
  public resetStats(providerName?: string): void {
    if (providerName) {
      this.requestStats.delete(providerName);
    } else {
      this.requestStats.clear();
    }
  }

  // 生成运行ID
  private generateRunId(): string {
    return `run_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
  }

  // 记录请求统计
  private recordRequestStats(providerName: string, response: EnhancedAIResponse): void {
    const stats = this.requestStats.get(providerName) || {
      totalRequests: 0,
      totalTokens: 0,
      totalCost: 0,
      averageLatency: 0,
      errors: 0,
    };

    stats.totalRequests++;

    if (response.usage) {
      stats.totalTokens += response.usage.totalTokens;
      stats.totalCost += response.usage.cost || 0;
    }

    if (response.langchainMetadata?.executionTime) {
      stats.averageLatency =
        (stats.averageLatency * (stats.totalRequests - 1) +
         response.langchainMetadata.executionTime) / stats.totalRequests;
    }

    this.requestStats.set(providerName, stats);
  }

  // 健康检查
  public async healthCheck(providerName?: string): Promise<Record<string, any>> {
    const results: Record<string, any> = {};
    const providers = providerName ? [providerName] : this.listProviders();

    for (const provider of providers) {
      try {
        const startTime = Date.now();
        const testMessages = [
          new SystemMessage('You are a helpful assistant.'),
          new HumanMessage('Say "Hello"'),
        ];

        const response = await this.generateEnhancedResponse(
          testMessages,
          provider,
          { maxTokens: 10 }
        );

        results[provider] = {
          status: 'healthy',
          latency: Date.now() - startTime,
          model: response.model,
          available: true,
        };
      } catch (error) {
        results[provider] = {
          status: 'unhealthy',
          error: error instanceof Error ? error.message : 'Unknown error',
          available: false,
        };
      }
    }

    return results;
  }

  // 成本估算
  public estimateCost(
    messages: BaseMessage[],
    providerName?: string,
    maxTokens?: number
  ): number {
    try {
      const provider = this.getProvider(providerName);
      const textLength = messages.reduce((sum, msg) => sum + msg.content.length, 0);
      const estimatedPromptTokens = Math.ceil(textLength / 4);
      const estimatedCompletionTokens = maxTokens || 100;

      const usage: TokenUsage = {
        promptTokens: estimatedPromptTokens,
        completionTokens: estimatedCompletionTokens,
        totalTokens: estimatedPromptTokens + estimatedCompletionTokens,
      };

      return provider.calculateCost(usage);
    } catch (error) {
      console.warn('Cost estimation failed:', error);
      return 0;
    }
  }
}

// 导出便捷函数
export const getEnhancedAIProviderManager = (): EnhancedAIProviderManager => {
  return EnhancedAIProviderManager.getEnhancedInstance();
};

export const generateEnhancedAIResponse = async (
  messages: BaseMessage[],
  provider?: string,
  options?: EnhancedAIRequestOptions
): Promise<EnhancedAIResponse> => {
  const manager = getEnhancedAIProviderManager();
  return await manager.generateEnhancedResponse(messages, provider, options);
};

export const generateEnhancedAIStream = async function* (
  messages: BaseMessage[],
  provider?: string,
  options?: EnhancedAIRequestOptions
): AsyncGenerator<string, void, unknown> {
  const manager = getEnhancedAIProviderManager();
  yield* manager.generateEnhancedStream(messages, provider, options);
};

export const generateBatchAIResponse = async (
  options: BatchRequestOptions
): Promise<BatchResponse> => {
  const manager = getEnhancedAIProviderManager();
  return await manager.generateBatchResponse(options);
};