#!/usr/bin/env python3
"""
Dashscope LLM Connection Manager for Fortran Code Commentator

This manager provides integration with Alibaba Cloud's Dashscope API
for faster model inference using qwen3-30b-a3b-instruct-2507.

Author: GSI Development Team
"""

import os
import json
import time
import logging
import requests
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Any

logger = logging.getLogger(__name__)


class DashscopeLLMManager:
    """
    Dashscope API connection manager optimized for speed
    
    Uses Alibaba Cloud's Dashscope API with qwen3-30b-a3b-instruct-2507 model
    for faster diff patch generation.
    """
    
    def __init__(self, 
                 api_key: str = None,
                 base_url: str = None,
                 model_name: str = "qwen3-30b-a3b-instruct-2507",
                 llm_logger=None):
        
        self.api_key = api_key or os.getenv('DASHSCOPE_API_KEY')
        self.base_url = base_url or os.getenv('DASHSCOPE_BASE_URL', 'https://dashscope.aliyuncs.com/compatible-mode/v1')
        self.model_name = model_name
        self.llm_logger = llm_logger
        
        if not self.api_key:
            raise ValueError("DASHSCOPE_API_KEY is required for Dashscope LLM manager")
        
        # Setup session with headers
        self.session = requests.Session()
        self.session.headers.update({
            'Authorization': f'Bearer {self.api_key}',
            'Content-Type': 'application/json',
            'Accept': 'application/json'
        })
        
        logger.info(f"Dashscope LLM Manager initialized: {self.model_name} at {self.base_url}")
        self._log_llm_operation("CONNECTION_INIT", "SYSTEM", f"Connected to {self.model_name} at {self.base_url}", True)
    
    def _log_llm_operation(self, operation: str, file_path: str, details: str = "", success: bool = True):
        """Log LLM operation details if logger is available"""
        if self.llm_logger:
            status = "SUCCESS" if success else "FAILED"
            self.llm_logger.info(f"DASHSCOPE_{operation} | {status} | {file_path} | {details}")
    
    def test_connection(self) -> bool:
        """Test connection to Dashscope API"""
        try:
            response = self.session.post(
                f"{self.base_url}/chat/completions",
                json={
                    "model": self.model_name,
                    "messages": [{"role": "user", "content": "Hello"}],
                    "max_tokens": 10
                },
                timeout=30
            )
            success = response.status_code == 200
            
            if not success:
                try:
                    error_detail = response.json()
                    error_msg = f"Status: {response.status_code}, Error: {error_detail}"
                    if response.status_code == 401:
                        logger.error("Dashscope API Key is invalid or expired. Please update DASHSCOPE_API_KEY in .env file")
                    elif response.status_code == 429:
                        logger.error("Dashscope API rate limit exceeded. Please try again later")
                    else:
                        logger.error(f"Dashscope API error: {error_msg}")
                except:
                    logger.error(f"Dashscope API error: Status {response.status_code}")
            
            self._log_llm_operation("CONNECTION_TEST", "SYSTEM", f"Status: {response.status_code}", success)
            return success
        except Exception as e:
            logger.error(f"Dashscope connection test failed: {e}")
            self._log_llm_operation("CONNECTION_TEST", "SYSTEM", f"Failed: {e}", False)
            return False
    
    def generate_diff_patch(self, prompt: str, fortran_code: str, file_path: str,
                           max_retries: int = 1) -> Optional[str]:
        """Generate diff patch using Dashscope API"""
        
        if not self.test_connection():
            logger.warning("Dashscope API unavailable - cannot generate diff patch")
            return None
        
        try:
            start_time = time.time()
            
            # Create full prompt for diff generation
            full_prompt = self._create_diff_prompt(prompt, file_path, fortran_code)
            
            payload = {
                "model": self.model_name,
                "messages": [
                    {"role": "user", "content": full_prompt}
                ],
                "max_tokens": 4096,
                "temperature": 0.1,
                "top_p": 0.9
            }
            
            logger.info(f"Generating diff patch for {Path(file_path).name} using Dashscope")
            self._log_llm_operation("DIFF_REQUEST", Path(file_path).name, 
                                  f"Model: {self.model_name}, timeout: 60s", True)
            
            response = self.session.post(
                f"{self.base_url}/chat/completions",
                json=payload,
                timeout=60
            )
            
            response_time = time.time() - start_time
            
            if response.status_code != 200:
                error_msg = f"API error {response.status_code}: {response.text}"
                logger.error(error_msg)
                self._log_llm_operation("DIFF_FAILED", Path(file_path).name, error_msg, False)
                return None
            
            result = response.json()
            if 'choices' not in result or len(result['choices']) == 0:
                logger.warning(f"No response choices in result: {result}")
                self._log_llm_operation("DIFF_FAILED", Path(file_path).name, "No choices in response", False)
                return None
            
            generated_content = result['choices'][0]['message']['content'].strip()
            
            # Handle empty response (file already well documented)
            if not generated_content or generated_content.lower() in ['empty', 'none', 'no changes needed']:
                logger.info(f"File already well documented, no changes needed: {Path(file_path).name}")
                self._log_llm_operation("ALREADY_DOCUMENTED", Path(file_path).name, 
                                      "File already has comprehensive documentation", True)
                return None  # This will be handled as "skip" rather than "failure"
            
            # Log the raw response before cleaning
            self._log_llm_operation("RAW_RESPONSE", Path(file_path).name, 
                                  f"Length: {len(generated_content)} chars", True)
            self._log_detailed_response(generated_content, Path(file_path).name, 1)
            
            # Clean markdown code blocks if present
            generated_content = self._clean_markdown_wrapper(generated_content)
            
            # Log the cleaned response
            self._log_llm_operation("CLEANED_RESPONSE", Path(file_path).name, 
                                  f"Length: {len(generated_content)} chars", True)
            self._log_detailed_response(generated_content, Path(file_path).name, 1, prefix="CLEANED")
            
            # Validate response
            is_valid, validation_issues = self._validate_diff_response(generated_content, file_path)
            
            if is_valid:
                logger.info(f"Successfully generated diff patch in {response_time:.2f}s")
                self._log_llm_operation("DIFF_SUCCESS", Path(file_path).name, 
                                      f"Generated in {response_time:.2f}s", True)
                return generated_content
            else:
                logger.warning(f"Generated diff failed validation: {validation_issues}")
                self._log_llm_operation("DIFF_VALIDATION_FAILED", Path(file_path).name, 
                                      f"{'; '.join(validation_issues)}", False)
                return None
                
        except requests.exceptions.Timeout:
            response_time = time.time() - start_time
            logger.warning(f"Request timeout after {response_time:.2f}s")
            self._log_llm_operation("DIFF_TIMEOUT", Path(file_path).name, 
                                  f"Timeout after {response_time:.2f}s", False)
            return None
            
        except Exception as e:
            logger.error(f"Error generating diff patch: {e}")
            self._log_llm_operation("DIFF_ERROR", Path(file_path).name, str(e), False)
            return None
    
    def _create_diff_prompt(self, base_prompt: str, file_path: str, fortran_code: str) -> str:
        """Create optimized diff prompt for Dashscope API"""
        filename = Path(file_path).name
        
        prompt = f"""{base_prompt}

CRITICAL INSTRUCTIONS - READ THE EXISTING CODE FIRST:

Original file path: {file_path}

Original Fortran code:
```fortran
{fortran_code}
```

**ANALYSIS REQUIRED**:
1. **EXAMINE existing comments** - Does this file already have comprehensive documentation?
2. **IF WELL DOCUMENTED**: Generate NO diff (return empty) or minimal targeted fixes only
3. **IF POORLY DOCUMENTED**: Generate diff to add missing documentation
4. **IF MIXED**: Edit existing comments using - (remove) and + (replace/improve)

**FORBIDDEN CONTENT**: 
- @author, @date, @version tags (ABSOLUTELY NEVER generate these)
- Redundant/duplicate comment blocks
- Adding comments where comprehensive ones already exist

EXACT DIFF FORMAT REQUIRED:
--- a/{filename}
+++ b/{filename}
@@ -start,count +start,count @@
 context line (unchanged - starts with space)
-old comment line to remove (starts with -)
+improved comment line (starts with +)
+new comment line (starts with +)
 original code line (unchanged - starts with space)

VALIDATION RULES:
- Every line after @@ header MUST start with exactly one of: space, +, or -
- Use + for added/improved lines
- Use - for removed/old lines  
- Use space for unchanged context lines
- Include 3-5 context lines around each change
- NO explanatory text before or after the diff

Return ONLY the diff patch starting with --- line, or EMPTY if file is already well documented."""
        
        return prompt
    
    def _clean_markdown_wrapper(self, content: str) -> str:
        """Remove markdown code blocks from response and fix diff formatting"""
        content = content.strip()
        
        # Remove markdown code blocks
        if content.startswith('```') and content.endswith('```'):
            lines = content.split('\n')
            if len(lines) >= 2:
                content = '\n'.join(lines[1:-1])
        
        # Remove diff markdown wrapper specifically
        if content.startswith('```diff') and content.endswith('```'):
            lines = content.split('\n')
            if len(lines) >= 2:
                content = '\n'.join(lines[1:-1])
        
        # Find the start of actual diff content (--- line)
        lines = content.split('\n')
        diff_start = -1
        for i, line in enumerate(lines):
            if line.startswith('--- a/') or line.startswith('--- '):
                diff_start = i
                break
        
        if diff_start >= 0:
            content = '\n'.join(lines[diff_start:])
        
        # Fix empty lines in diff hunks - add space prefix for context lines
        lines = content.split('\n')
        cleaned_lines = []
        in_hunk = False
        
        for line in lines:
            if line.startswith('---') or line.startswith('+++'):
                cleaned_lines.append(line)
                continue
            elif line.startswith('@@'):
                in_hunk = True
                cleaned_lines.append(line)
                continue
            elif not in_hunk:
                cleaned_lines.append(line)
                continue
            
            # We're in a hunk - fix line prefixes
            if line == '' or (line and line[0] not in [' ', '+', '-']):
                # Empty line or line without proper prefix - make it a context line
                cleaned_lines.append(' ' + line.lstrip())
            else:
                cleaned_lines.append(line)
        
        return '\n'.join(cleaned_lines).strip()
    
    def _validate_diff_response(self, content: str, file_path: str) -> tuple:
        """Validate diff response format"""
        errors = []
        
        if not content.strip():
            errors.append("Empty response")
            return False, errors
        
        lines = content.split('\n')
        
        # Check for required headers
        if not any(line.startswith('--- a/') for line in lines):
            errors.append("Missing '--- a/filename' header")
            
        if not any(line.startswith('+++ b/') for line in lines):
            errors.append("Missing '+++ b/filename' header")
            
        # Check for hunk headers
        if not any(line.startswith('@@') for line in lines):
            errors.append("No valid hunk headers found")
        
        # Validate line prefixes in hunks
        invalid_lines = []
        in_hunk = False
        for i, line in enumerate(lines):
            if line.startswith('@@'):
                in_hunk = True
                continue
            
            if in_hunk and line:
                prefix = line[0]
                if prefix not in [' ', '+', '-']:
                    invalid_lines.append(i + 1)
        
        if invalid_lines:
            errors.append(f"Invalid diff content lines: {len(invalid_lines)} lines don't start with +, -, or space")
        
        # Check for structured comments
        has_structured_comments = any('@brief' in line or '@details' in line or '@param' in line 
                                    for line in lines)
        if not has_structured_comments:
            errors.append("Generated diff appears to lack structured comments (@brief, @details, etc.)")
        
        return len(errors) == 0, errors
    
    def _log_detailed_response(self, content: str, file_path: str, attempt: int, prefix: str = "RAW"):
        """Log the detailed response content for debugging"""
        if not self.llm_logger:
            return
            
        # Log a separator and header
        self.llm_logger.info(f"=== DASHSCOPE {prefix} RESPONSE - {file_path} - Attempt {attempt} ===")
        
        # Log the content with line numbers for easier analysis
        lines = content.split('\n')
        for i, line in enumerate(lines[:50], 1):  # Limit to first 50 lines
            prefix_char = line[0] if line else '(empty)'
            self.llm_logger.info(f"L{i:02d}: [{prefix_char}] {repr(line)}")
        
        if len(lines) > 50:
            self.llm_logger.info(f"... truncated {len(lines) - 50} more lines")
            
        self.llm_logger.info(f"=== END DASHSCOPE {prefix} RESPONSE - Total {len(lines)} lines ===\n")