import { BaseLLMService, LLMConfig, LLMMessage, LLMResponse } from './BaseLLMService';

/**
 * Qwen (通义千问) LLM服务
 */
export class QwenLLMService extends BaseLLMService {
  constructor(config: LLMConfig) {
    super({
      ...config,
      endpoint: config.endpoint || 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation',
      model: config.model || 'qwen-plus',
      temperature: config.temperature || 0.7,
      maxTokens: config.maxTokens || 2000
    });
  }

  async sendMessage(messages: LLMMessage[]): Promise<LLMResponse> {
    const startTime = Date.now();

    const requestData = {
      model: this.config.model,
      input: {
        messages: messages.map(msg => ({
          role: msg.role,
          content: msg.content
        }))
      },
      parameters: {
        temperature: this.config.temperature,
        max_tokens: this.config.maxTokens,
        result_format: 'message'
      }
    };

    const headers = {
      'Authorization': `Bearer ${this.config.apiKey}`,
      'X-DashScope-SSE': 'disable'
    };

    try {
      const response = await this.makeRequest(this.config.endpoint!, requestData, headers);
      const endTime = Date.now();

      this.logger.info('Qwen API request completed', {
        duration: endTime - startTime,
        status: response.status
      });

      if (response.data.output && response.data.output.choices && response.data.output.choices.length > 0) {
        const choice = response.data.output.choices[0];
        const content = choice.message?.content || '';

        return {
          content,
          usage: response.data.usage ? {
            promptTokens: response.data.usage.input_tokens || 0,
            completionTokens: response.data.usage.output_tokens || 0,
            totalTokens: response.data.usage.total_tokens || 0
          } : undefined,
          model: this.config.model,
          finishReason: choice.finish_reason
        };
      } else {
        throw new Error('Invalid response format from Qwen API');
      }
    } catch (error) {
      this.logger.error('Qwen API request failed', {
        error: error.message,
        duration: Date.now() - startTime
      });
      throw error;
    }
  }

  async testConnection(): Promise<{ success: boolean; message: string; responseTime?: number }> {
    const startTime = Date.now();

    try {
      const testMessages: LLMMessage[] = [
        { role: 'user', content: '请简单回复"连接测试成功"' }
      ];

      const response = await this.sendMessage(testMessages);
      const responseTime = Date.now() - startTime;

      if (response.content.includes('连接测试成功') || response.content.includes('成功')) {
        return {
          success: true,
          message: '连接测试成功',
          responseTime
        };
      } else {
        return {
          success: true,
          message: `连接成功，响应时间: ${responseTime}ms`,
          responseTime
        };
      }
    } catch (error) {
      return {
        success: false,
        message: `连接失败: ${error.message}`,
        responseTime: Date.now() - startTime
      };
    }
  }
}

/**
 * Deepseek LLM服务
 */
export class DeepseekLLMService extends BaseLLMService {
  constructor(config: LLMConfig) {
    super({
      ...config,
      endpoint: config.endpoint || 'https://api.deepseek.com/v1/chat/completions',
      model: config.model || 'deepseek-chat',
      temperature: config.temperature || 0.7,
      maxTokens: config.maxTokens || 2000
    });
  }

  async sendMessage(messages: LLMMessage[]): Promise<LLMResponse> {
    const startTime = Date.now();

    const requestData = {
      model: this.config.model,
      messages: messages.map(msg => ({
        role: msg.role,
        content: msg.content
      })),
      temperature: this.config.temperature,
      max_tokens: this.config.maxTokens,
      stream: false
    };

    const headers = {
      'Authorization': `Bearer ${this.config.apiKey}`
    };

    try {
      const response = await this.makeRequest(this.config.endpoint!, requestData, headers);
      const endTime = Date.now();

      this.logger.info('Deepseek API request completed', {
        duration: endTime - startTime,
        status: response.status
      });

      if (response.data.choices && response.data.choices.length > 0) {
        const choice = response.data.choices[0];
        const content = choice.message?.content || '';

        return {
          content,
          usage: response.data.usage ? {
            promptTokens: response.data.usage.prompt_tokens || 0,
            completionTokens: response.data.usage.completion_tokens || 0,
            totalTokens: response.data.usage.total_tokens || 0
          } : undefined,
          model: this.config.model,
          finishReason: choice.finish_reason
        };
      } else {
        throw new Error('Invalid response format from Deepseek API');
      }
    } catch (error) {
      this.logger.error('Deepseek API request failed', {
        error: error.message,
        duration: Date.now() - startTime
      });
      throw error;
    }
  }

  async testConnection(): Promise<{ success: boolean; message: string; responseTime?: number }> {
    const startTime = Date.now();

    try {
      const testMessages: LLMMessage[] = [
        { role: 'user', content: '请简单回复"连接测试成功"' }
      ];

      const response = await this.sendMessage(testMessages);
      const responseTime = Date.now() - startTime;

      if (response.content.includes('连接测试成功') || response.content.includes('成功')) {
        return {
          success: true,
          message: '连接测试成功',
          responseTime
        };
      } else {
        return {
          success: true,
          message: `连接成功，响应时间: ${responseTime}ms`,
          responseTime
        };
      }
    } catch (error) {
      return {
        success: false,
        message: `连接失败: ${error.message}`,
        responseTime: Date.now() - startTime
      };
    }
  }
}

/**
 * 私有部署LLM服务
 */
export class PrivateLLMService extends BaseLLMService {
  constructor(config: LLMConfig) {
    super({
      ...config,
      model: config.model || 'private-model',
      temperature: config.temperature || 0.7,
      maxTokens: config.maxTokens || 2000
    });

    if (!config.endpoint) {
      throw new Error('Private LLM service requires endpoint configuration');
    }
  }

  async sendMessage(messages: LLMMessage[]): Promise<LLMResponse> {
    const startTime = Date.now();

    // 兼容多种私有部署格式
    const requestData = {
      model: this.config.model,
      messages: messages.map(msg => ({
        role: msg.role,
        content: msg.content
      })),
      temperature: this.config.temperature,
      max_tokens: this.config.maxTokens,
      stream: false
    };

    const headers: Record<string, string> = {};
    
    if (this.config.apiKey) {
      // 支持多种认证方式
      if (this.config.apiKey.startsWith('Bearer ')) {
        headers['Authorization'] = this.config.apiKey;
      } else {
        headers['Authorization'] = `Bearer ${this.config.apiKey}`;
      }
    }

    try {
      const response = await this.makeRequest(this.config.endpoint!, requestData, headers);
      const endTime = Date.now();

      this.logger.info('Private LLM API request completed', {
        duration: endTime - startTime,
        status: response.status,
        endpoint: this.config.endpoint
      });

      // 兼容不同的响应格式
      let content = '';
      let usage = undefined;

      if (response.data.choices && response.data.choices.length > 0) {
        // OpenAI 兼容格式
        const choice = response.data.choices[0];
        content = choice.message?.content || choice.text || '';
      } else if (response.data.response) {
        // 简单响应格式
        content = response.data.response;
      } else if (response.data.content) {
        // 直接内容格式
        content = response.data.content;
      } else {
        throw new Error('Unknown response format from private LLM API');
      }

      if (response.data.usage) {
        usage = {
          promptTokens: response.data.usage.prompt_tokens || 0,
          completionTokens: response.data.usage.completion_tokens || 0,
          totalTokens: response.data.usage.total_tokens || 0
        };
      }

      return {
        content,
        usage,
        model: this.config.model,
        finishReason: 'stop'
      };
    } catch (error) {
      this.logger.error('Private LLM API request failed', {
        error: error.message,
        duration: Date.now() - startTime,
        endpoint: this.config.endpoint
      });
      throw error;
    }
  }

  async testConnection(): Promise<{ success: boolean; message: string; responseTime?: number }> {
    const startTime = Date.now();

    try {
      const testMessages: LLMMessage[] = [
        { role: 'user', content: '请简单回复"连接测试成功"' }
      ];

      const response = await this.sendMessage(testMessages);
      const responseTime = Date.now() - startTime;

      return {
        success: true,
        message: `连接成功，响应时间: ${responseTime}ms`,
        responseTime
      };
    } catch (error) {
      return {
        success: false,
        message: `连接失败: ${error.message}`,
        responseTime: Date.now() - startTime
      };
    }
  }
}

/**
 * LLM服务工厂
 */
export class LLMServiceFactory {
  private static instances: Map<string, BaseLLMService> = new Map();

  /**
   * 创建LLM服务实例
   */
  static createService(config: LLMConfig): BaseLLMService {
    const cacheKey = `${config.provider}_${config.apiKey.substring(0, 8)}`;
    
    if (this.instances.has(cacheKey)) {
      return this.instances.get(cacheKey)!;
    }

    let service: BaseLLMService;

    switch (config.provider) {
      case 'qwen':
        service = new QwenLLMService(config);
        break;
      case 'deepseek':
        service = new DeepseekLLMService(config);
        break;
      case 'private':
        service = new PrivateLLMService(config);
        break;
      default:
        throw new Error(`Unsupported LLM provider: ${config.provider}`);
    }

    this.instances.set(cacheKey, service);
    return service;
  }

  /**
   * 清除服务实例缓存
   */
  static clearCache(): void {
    this.instances.clear();
  }

  /**
   * 获取支持的提供商列表
   */
  static getSupportedProviders(): string[] {
    return ['qwen', 'deepseek', 'private'];
  }
}