import OpenAI from 'openai';
import config from '@config/index';
import logger from '@utils/logger';

interface AlertAnalysisRequest {
  alertName: string;
  severity: string;
  description: string;
  labels: Record<string, string>;
  annotations: Record<string, string>;
}

interface AlertAnalysisResponse {
  summary: string;
  rootCause: string;
  suggestions: string[];
  relatedAlerts: string[];
  confidence: number;
  tags: string[];
}

class LLMService {
  private client: OpenAI | null = null;
  private enabled: boolean = false;

  constructor() {
    this.initializeClient();
  }

  /**
   * Initialize OpenAI client
   */
  private initializeClient(): void {
    try {
      if (config.env.LLM_API_KEY && config.env.LLM_BASE_URL) {
        this.client = new OpenAI({
          baseURL: config.env.LLM_BASE_URL,
          apiKey: config.env.LLM_API_KEY,
        });
        this.enabled = true;
        logger.info('LLM service initialized successfully');
      } else {
        logger.warn('LLM service not configured, using fallback analysis');
        this.enabled = false;
      }
    } catch (error) {
      logger.error('Failed to initialize LLM client:', error as Error);
      this.enabled = false;
    }
  }

  /**
   * Analyze alert with LLM
   */
  async analyzeAlert(alert: AlertAnalysisRequest): Promise<AlertAnalysisResponse> {
    if (!this.enabled || !this.client) {
      return this.generateFallbackAnalysis(alert);
    }

    try {
      const prompt = this.buildAlertAnalysisPrompt(alert);

      const response = await this.client.chat.completions.create({
        model: config.env.LLM_MODEL || 'claude-sonnet-4-20250514',
        messages: [
          {
            role: 'system',
            content: `You are an experienced DevOps engineer specializing in system monitoring and incident response.
Analyze the alert and provide actionable insights.

Important: You must strictly follow the JSON format below, with field names in English and all content values also in English:

{
  "summary": "Brief overview of the issue (English, within 50 words)",
  "rootCause": "Root cause analysis (English, detailed explanation of possible causes, within 100 words)",
  "suggestions": ["Specific actionable step 1 (English)", "Specific actionable step 2 (English)", "Specific actionable step 3 (English)", "Specific actionable step 4 (English)"],
  "confidence": 0.85,
  "tags": ["tag1", "tag2", "tag3"]
}

Requirements:
1. JSON field names must be in English: summary, rootCause, suggestions, confidence, tags
2. All field content values must be in English
3. Suggestions must provide at least 3-5 specific actionable steps
4. Root cause analysis should be detailed and professional
5. Confidence should be based on the completeness and clarity of alert information, value between 0-1
6. Tags should accurately reflect alert type and impact scope
7. Do not add any markdown code block markers, return pure JSON only`,
          },
          {
            role: 'user',
            content: prompt,
          },
        ],
        temperature: config.env.LLM_TEMPERATURE || 0.7,
        max_tokens: config.env.LLM_MAX_TOKENS || 3000,
      });

      const content = response.choices[0]?.message?.content;
      if (!content) {
        throw new Error('Empty response from LLM');
      }

      // 记录 AI 原始响应用于调试
      console.log('=== AI RAW RESPONSE ===');
      console.log(content);
      console.log('=== END RAW RESPONSE ===');

      // Extract JSON from response (handle markdown code blocks)
      const jsonMatch = content.match(/\{[\s\S]*\}/);
      if (!jsonMatch) {
        console.error('No JSON found in response:', content);
        throw new Error('No JSON found in LLM response');
      }

      console.log('=== MATCHED JSON ===');
      console.log(jsonMatch[0]);
      console.log('=== END MATCHED JSON ===');

      const analysis = JSON.parse(jsonMatch[0]);
      console.log('=== PARSED ANALYSIS ===');
      console.log(JSON.stringify(analysis, null, 2));
      console.log('=== END PARSED ===');

      return {
        summary: analysis.summary || 'Unable to generate summary',
        rootCause: analysis.rootCause || 'Unknown',
        suggestions: Array.isArray(analysis.suggestions) ? analysis.suggestions : [],
        relatedAlerts: [],
        confidence: typeof analysis.confidence === 'number' ? analysis.confidence : 0.5,
        tags: Array.isArray(analysis.tags) ? analysis.tags : [],
      };
    } catch (error) {
      logger.error('Failed to analyze alert with LLM:', error as Error);
      return this.generateFallbackAnalysis(alert);
    }
  }

  /**
   * Build prompt for alert analysis
   */
  private buildAlertAnalysisPrompt(alert: AlertAnalysisRequest): string {
    return `
Please analyze the following Prometheus alert and provide professional diagnostic insights:

Alert Name: ${alert.alertName}
Severity: ${alert.severity}
Description: ${alert.description}

Labels:
${Object.entries(alert.labels)
  .map(([key, value]) => `  ${key}: ${value}`)
  .join('\n')}

Annotations:
${Object.entries(alert.annotations)
  .map(([key, value]) => `  ${key}: ${value}`)
  .join('\n')}

Based on the above information, please analyze and provide:
1. Issue summary (2-3 sentences, briefly explain what happened)
2. Root cause analysis (detailed analysis of possible causes, considering services, network, configuration, etc.)
3. Specific resolution suggestions (at least 3-5 actionable steps, be specific and clear)
4. Confidence score (between 0-1, based on the completeness of alert information)
5. Relevant tags (2-4 tags for categorization)

Important: Must strictly follow the specified JSON format, do not include markdown code block markers, and do not add any additional explanatory text.
`.trim();
  }

  /**
   * Generate basic analysis when LLM is unavailable
   */
  private generateFallbackAnalysis(alert: AlertAnalysisRequest): AlertAnalysisResponse {
    const suggestions = [];

    // Generate context-aware suggestions based on alert type
    if (alert.alertName.toLowerCase().includes('cpu')) {
      suggestions.push('Check for CPU-intensive processes');
      suggestions.push('Review application performance metrics');
      suggestions.push('Consider scaling resources');
    } else if (alert.alertName.toLowerCase().includes('memory')) {
      suggestions.push('Check for memory leaks');
      suggestions.push('Review memory usage patterns');
      suggestions.push('Consider increasing memory allocation');
    } else if (alert.alertName.toLowerCase().includes('disk')) {
      suggestions.push('Clean up old logs and temporary files');
      suggestions.push('Review disk usage by directory');
      suggestions.push('Consider expanding storage');
    } else {
      suggestions.push('Check system logs for details');
      suggestions.push('Review recent changes');
      suggestions.push('Contact the responsible team');
    }

    return {
      summary: `${alert.severity.toUpperCase()} alert detected: ${alert.alertName}`,
      rootCause: alert.description || 'Requires manual investigation',
      suggestions,
      relatedAlerts: [],
      confidence: 0.3,
      tags: [alert.severity, alert.labels.job || 'unknown', 'auto-generated'],
    };
  }

  /**
   * Check LLM service health
   */
  async checkHealth(): Promise<boolean> {
    if (!this.enabled || !this.client) {
      return false;
    }

    try {
      const response = await this.client.chat.completions.create({
        model: config.env.LLM_MODEL || 'claude-sonnet-4-20250514',
        messages: [{ role: 'user', content: 'ping' }],
        max_tokens: 10,
      });

      return !!response.choices[0]?.message?.content;
    } catch (error) {
      logger.error('LLM health check failed:', error as Error);
      return false;
    }
  }

  /**
   * Get LLM service status
   */
  getStatus(): { enabled: boolean; provider: string; model: string } {
    return {
      enabled: this.enabled,
      provider: config.env.LLM_BASE_URL || 'not-configured',
      model: config.env.LLM_MODEL || 'not-configured',
    };
  }
}

export default new LLMService();
