#!/usr/bin/env python3
"""
Main Code Commentary System for Fortran Code Commentator

Coordinates all components to process Fortran source files and add
comprehensive, AI-generated comments following structured formats.
"""

import os
import shutil
import time
import json
import hashlib
import logging
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Any
from dotenv import load_dotenv

from .models import ProcessingTask, ProcessingResults, PatchError, DiffAttempt, PromptConfig, ValidationResult
from ..processors.file import FileProcessor
from ..processors.classifier import CodeClassifier
from ..processors.diff import DiffPatchProcessor
from ..processors.retry import DiffRetryManager
from ..managers.prompt import PromptManager
from ..managers.llm import LLMConnectionManager
from ..managers.dashscope_llm import DashscopeLLMManager
from ..utils.parallel import ParallelProcessingEngine

logger = logging.getLogger(__name__)


class CodeCommentarySystem:
    """Main git diff-based automated code commentary system"""
    
    def __init__(self, source_path: str, config: Any, 
                 ollama_url: str = os.getenv('OLLAMA_HOST', 'http://60.245.128.27:11434'),
                 model_name: str = os.getenv('OLLAMA_MODEL', 'hopephoto/qwen3-30b-a3b-instruct_q8:latest'),
                 use_fast: bool = False, fast_only: bool = False):
        
        # Load environment variables from .env file
        load_dotenv()
        
        self.source_path = source_path
        self.config = config
        self.use_fast = use_fast
        self.fast_only = fast_only
        self.config_dir = getattr(config, 'config_dir', './config')
        
        # Initialize components
        self.file_processor = FileProcessor()
        self.classifier = CodeClassifier(source_path)
        self.prompt_manager = PromptManager(self.config_dir)
        
        # Setup LLM logging first
        self.llm_log_file = Path(self.config_dir) / "llm_operations.log"
        self._setup_llm_logging()
        
        # Initialize LLM managers - NO FALLBACK when fast is requested
        if use_fast:
            logger.info("Using Dashscope API for fast processing (NO fallback - report errors directly)")
            self.primary_llm_manager = DashscopeLLMManager(llm_logger=self.llm_logger)
            self.fallback_llm_manager = None  # No fallback for fast mode
            self.ollama_manager = self.primary_llm_manager  # For backward compatibility
        else:
            logger.info("Using Ollama for standard processing")
            self.primary_llm_manager = LLMConnectionManager(ollama_url, model_name, llm_logger=self.llm_logger)
            self.fallback_llm_manager = None
            self.ollama_manager = self.primary_llm_manager  # For backward compatibility
        # Create backup directory for this session
        self.backup_dir = Path(self.config_dir) / "backups" / time.strftime("%Y%m%d_%H%M%S")
        self.diff_processor = DiffPatchProcessor(backup_dir=str(self.backup_dir))
        self.retry_manager = DiffRetryManager()
        
        # Enhanced parallel processing engine
        max_workers = getattr(config, 'max_workers', 4)
        self.parallel_engine = ParallelProcessingEngine(worker_count=max_workers)
        logger.info(f"Initialized parallel processing engine with {max_workers} workers")
        
        # Processing status tracking
        self.status_file = Path(self.config_dir) / "processing_status.json"
        
        # Clear processing status if retry flag is set
        retry_flag = getattr(config, 'retry_processed', False)
        if retry_flag:
            logger.info("Retry flag set - clearing processing status cache")
            if self.status_file.exists():
                self.status_file.unlink()
                logger.info(f"Cleared processing status file: {self.status_file}")
        
        self.processed_files = self._load_processing_status()
    
    def discover_files(self) -> List[str]:
        """Discover Fortran files for processing"""
        return self.discover_fortran_files()
        
        # Processing statistics
        self.stats = {
            'total_files': 0,
            'successful_files': 0,
            'failed_files': 0,
            'processing_time': 0.0,
            'worker_stats': {},
            'load_balance_info': {}
        }
    
    def discover_fortran_files(self) -> List[str]:
        """Enhanced Fortran file discovery with comprehensive search"""
        gsi_path = Path(self.source_path)
        
        # Define search paths in priority order
        search_paths = [
            str(gsi_path / "gsi_classification"),  # Pre-classified files (highest priority)
            str(gsi_path / "gsi"),                 # Main GSI source
            str(gsi_path / "enkf")                 # EnKF source
        ]
        
        # Use enhanced classifier discovery
        discovered_files = self.classifier.discover_fortran_files(search_paths)
        
        # Prioritize pre-classified files, then others
        all_files = []
        all_files.extend(discovered_files.get('gsi_classification', []))
        all_files.extend(discovered_files.get('gsi_main', []))
        all_files.extend(discovered_files.get('enkf_main', []))
        
        # Remove duplicates while preserving order
        seen = set()
        unique_files = []
        for file_path in all_files:
            if file_path not in seen:
                seen.add(file_path)
                unique_files.append(file_path)
        
        logger.info(f"Discovered {len(unique_files)} unique Fortran files")
        return unique_files
    
    def create_processing_tasks(self, files: List[str]) -> Tuple[List[ProcessingTask], Dict[str, Any]]:
        """Create processing tasks with enhanced filtering and validation"""
        tasks = []
        
        # Filter files by size with detailed statistics
        filtered_files, filter_stats = self.file_processor.filter_files_by_size(files)
        
        task_creation_stats = {
            'total_input_files': len(files),
            'filtered_files': len(filtered_files),
            'created_tasks': 0,
            'classification_distribution': {},
            'priority_distribution': {},
            'failed_classifications': []
        }
        
        for file_path in filtered_files:
            try:
                # Get file info
                file_info = self.file_processor.get_file_info(file_path)
                if not file_info['is_processable']:
                    continue
                
                # Classify file
                classification = self.classifier.classify_file(file_path)
                
                # Filter by target classification if specified
                target_classification = getattr(self.config, 'target_classification', 'auto')
                if target_classification != 'auto' and classification != target_classification:
                    continue
                
                if classification not in task_creation_stats['classification_distribution']:
                    task_creation_stats['classification_distribution'][classification] = 0
                task_creation_stats['classification_distribution'][classification] += 1
                
                # Load prompts for classification
                prompt_config = self.prompt_manager.load_prompts(classification)
                
                # Calculate priority
                priority = self._calculate_priority(classification, file_info['line_count'])
                if priority not in task_creation_stats['priority_distribution']:
                    task_creation_stats['priority_distribution'][priority] = 0
                task_creation_stats['priority_distribution'][priority] += 1
                
                # Create task
                task = ProcessingTask(
                    file_path=file_path,
                    classification=classification,
                    line_count=file_info['line_count'],
                    prompt_config=prompt_config,
                    priority=priority
                )
                tasks.append(task)
                task_creation_stats['created_tasks'] += 1
                
            except Exception as e:
                error_msg = f"Failed to create task for {file_path}: {e}"
                logger.error(error_msg)
                task_creation_stats['failed_classifications'].append((file_path, str(e)))
        
        # Sort by priority (higher priority first)
        tasks.sort(key=lambda x: x.priority, reverse=True)
        
        # Log task creation statistics
        logger.info(f"Task creation completed:")
        logger.info(f"  Input files: {task_creation_stats['total_input_files']}")
        logger.info(f"  Filtered files: {task_creation_stats['filtered_files']}")
        logger.info(f"  Created tasks: {task_creation_stats['created_tasks']}")
        logger.info(f"  Classification distribution: {task_creation_stats['classification_distribution']}")
        
        # Combine statistics
        combined_stats = {
            'filtering': filter_stats,
            'task_creation': task_creation_stats
        }
        
        return tasks, combined_stats
    
    def _calculate_priority(self, classification: str, line_count: int) -> int:
        """Calculate processing priority"""
        base_priority = {
            "core_analysis": 100,
            "background_grid": 80,
            "observation_processing": 70,
            "io_interface": 60,
            "utilities": 50
        }.get(classification, 50)
        
        # Smaller files get higher priority
        size_bonus = max(0, 100 - (line_count // 10))
        return base_priority + size_bonus
    
    def process_single_file(self, task: ProcessingTask) -> Tuple[bool, str]:
        """Process a single file using git diff approach"""
        attempts = []
        
        try:
            # Read file content
            content = self.file_processor.read_file_content(task.file_path)
            if content is None:
                return False, "Failed to read file"
            
            # Create full prompt for diff generation
            full_prompt = self._create_diff_prompt(task.prompt_config, task.classification)
            
            # Attempt diff generation and application with enhanced retry logic
            for attempt in range(1, self.retry_manager.max_attempts + 1):
                attempt_start_time = time.strftime("%Y-%m-%d %H:%M:%S")
                attempt_processing_start = time.time()
                
                # Calculate and apply backoff delay (except for first attempt)
                if attempt > 1 and attempts:
                    last_error_type = attempts[-1].error.error_type if attempts[-1].error else "UNKNOWN_ERROR"
                    backoff_time = self.retry_manager.calculate_backoff_time(attempt, last_error_type)
                    logger.info(f"Applying backoff delay: {backoff_time:.2f}s for {last_error_type}")
                    time.sleep(backoff_time)
                else:
                    backoff_time = 0.0
                
                try:
                    # Generate diff patch
                    if attempt == 1:
                        # First attempt - use normal prompt with fallback
                        diff_patch = self._generate_diff_with_fallback(
                            full_prompt, task.file_path, content, task.classification
                        )
                    else:
                        # Retry with error correction and fallback
                        last_attempt = attempts[-1]
                        correction_prompt = self.retry_manager.generate_correction_prompt(
                            last_attempt.patch_content, last_attempt.error, content, task.file_path
                        )
                        diff_patch = self._generate_diff_with_fallback(
                            correction_prompt, task.file_path, content, task.classification
                        )
                    
                    if diff_patch is None:
                        error = PatchError("GENERATION_FAILED", "LLM failed to generate diff", context="No response from model", 
                                         severity="HIGH", recovery_strategy="PROMPT_SIMPLIFICATION")
                        processing_duration = time.time() - attempt_processing_start
                        attempts.append(DiffAttempt(attempt, "", error, False, attempt_start_time, backoff_time, processing_duration))
                        
                        # Check if we should continue retrying based on enhanced logic
                        if not self.retry_manager.should_retry(attempt, error):
                            logger.warning(f"Stopping retries for {task.file_path}: {error.severity} severity {error.error_type}")
                            break
                        continue
                    
                    # Validate diff syntax
                    validation = self.diff_processor.validate_diff_format(diff_patch)
                    if not validation.is_valid:
                        error_msg = "; ".join(validation.errors)
                        severity = "LOW" if any(warning in error_msg.lower() for warning in ["header", "whitespace"]) else "MEDIUM"
                        error = PatchError("SYNTAX_ERROR", error_msg, context="Invalid diff format", 
                                         severity=severity, recovery_strategy="FORMAT_CORRECTION")
                        processing_duration = time.time() - attempt_processing_start
                        attempts.append(DiffAttempt(attempt, diff_patch, error, False, attempt_start_time, backoff_time, processing_duration))
                        
                        # Check enhanced retry logic
                        if not self.retry_manager.should_retry(attempt, error):
                            logger.warning(f"Stopping retries for {task.file_path}: {error.severity} severity validation failure")
                            break
                        
                        logger.warning(f"Diff validation failed (attempt {attempt}): {error_msg}")
                        continue
                    
                    # Apply patch
                    app_result = self.diff_processor.apply_patch(diff_patch, task.file_path)
                    if not app_result.success:
                        # Analyze error for better correction using enhanced analysis
                        error = self.retry_manager.analyze_patch_error(app_result.error_message, diff_patch)
                        processing_duration = time.time() - attempt_processing_start
                        attempts.append(DiffAttempt(attempt, diff_patch, error, False, attempt_start_time, backoff_time, processing_duration))
                        
                        # Enhanced retry decision
                        if not self.retry_manager.should_retry(attempt, error):
                            logger.warning(f"Stopping retries for {task.file_path}: {error.severity} severity {error.error_type}")
                            break
                        
                        logger.warning(f"Patch application failed (attempt {attempt}): {error.error_type} - {error.error_message}")
                        continue
                    
                    # Validate Fortran syntax after successful patch
                    if not self.diff_processor.validate_fortran_syntax(task.file_path):
                        # Restore from backup
                        if app_result.backup_path and os.path.exists(app_result.backup_path):
                            shutil.copy2(app_result.backup_path, task.file_path)
                        
                        error = PatchError("SYNTAX_VALIDATION", "Fortran syntax invalid after patch", context="Syntax check failed",
                                         severity="HIGH", recovery_strategy="ROLLBACK_CORRECTION")
                        processing_duration = time.time() - attempt_processing_start
                        attempts.append(DiffAttempt(attempt, diff_patch, error, False, attempt_start_time, backoff_time, processing_duration))
                        
                        if not self.retry_manager.should_retry(attempt, error):
                            logger.warning(f"Stopping retries for {task.file_path}: Syntax validation consistently failing")
                            break
                        
                        logger.warning(f"Syntax validation failed (attempt {attempt}), restored from backup")
                        continue
                    
                    # Success!
                    processing_duration = time.time() - attempt_processing_start
                    attempts.append(DiffAttempt(attempt, diff_patch, None, True, attempt_start_time, backoff_time, processing_duration))
                    logger.info(f"Successfully processed: {task.file_path} (attempt {attempt}, {processing_duration:.2f}s)")
                    return True, "Success"
                    
                except Exception as e:
                    error = PatchError("PROCESSING_ERROR", str(e), context="Unexpected error during processing",
                                     severity="HIGH", recovery_strategy="ENVIRONMENT_RESET")
                    processing_duration = time.time() - attempt_processing_start
                    attempts.append(DiffAttempt(attempt, diff_patch if 'diff_patch' in locals() else "", error, False, attempt_start_time, backoff_time, processing_duration))
                    logger.error(f"Processing error on attempt {attempt}: {e}")
                    
                    if not self.retry_manager.should_retry(attempt, error):
                        logger.error(f"Stopping retries for {task.file_path}: Processing errors indicate system issues")
                        break
                    continue
            
            # All attempts failed
            self.retry_manager.log_final_failure(task.file_path, attempts)
            return False, f"Failed after {len(attempts)} attempts"
            
        except Exception as e:
            error_msg = f"Error processing {task.file_path}: {e}"
            logger.error(error_msg)
            return False, error_msg
    
    def _create_full_prompt(self, prompt_config: PromptConfig, classification: str) -> str:
        """Create full prompt from configuration"""
        prompt_parts = [
            "You are an expert Fortran code documentation assistant.",
            "Add comprehensive comments to the following Fortran code using the structured format.",
            "",
            "STRUCTURED COMMENT FORMAT:",
            prompt_config.comment_standard.subroutine_template if prompt_config.comment_standard else "",
            "",
            "CLASSIFICATION GUIDANCE:",
            prompt_config.classification_specific,
            "",
            "BACKGROUND CONTEXT:",
            prompt_config.background_context,
            "",
            "INSTRUCTIONS:",
            "1. Add structured comments to all subroutines and functions",
            "2. Use the @brief, @details, @param, @return, @usage, @algorithm tags as appropriate",
            "3. Include mathematical notation for algorithms where relevant",
            "4. Focus on practical implementation details",
            "5. Ensure comments are compatible with Sphinx/Doxygen documentation generators",
            "6. Preserve all original code structure and logic",
            "7. Return ONLY the commented Fortran code, no additional text or explanations"
        ]
        
        return "\n".join(filter(None, prompt_parts))
    
    def _create_diff_prompt(self, prompt_config: PromptConfig, classification: str) -> str:
        """Create git diff-specific prompt with integrated structured comment standard"""
        
        # Get classification-specific template if available
        template_info = self.prompt_manager.classification_templates.get(classification, {})
        focus_prompt = template_info.get("focus_prompt", "")
        example_context = template_info.get("example_context", "")
        
        # Build comprehensive prompt with structured templates
        prompt_parts = [
            "You are an expert Fortran code documentation assistant for GSI/EnKF systems.",
            "Generate ONLY a unified diff format patch that adds structured comments to Fortran code.",
            "",
            "STRUCTURED COMMENT TEMPLATES:",
            "For subroutines use:",
            prompt_config.comment_standard.subroutine_template if prompt_config.comment_standard else "",
            "",
            "For functions use:",
            prompt_config.comment_standard.function_template if prompt_config.comment_standard else "",
            "",
            "For modules use:",
            prompt_config.comment_standard.module_template if prompt_config.comment_standard else "",
            "",
            f"CLASSIFICATION: {classification.upper()}",
            focus_prompt,
            "",
            "TECHNICAL CONTEXT:",
            f"Example contexts: {example_context}" if example_context else "",
            prompt_config.background_context,
            "",
            "CRITICAL DIFF REQUIREMENTS:",
            "1. Use exact unified diff format: --- a/filename +++ b/filename",
            "2. Include proper @@ hunk headers with accurate line numbers",
            "3. Add structured comments using the templates above",
            "4. Use standard tags: @brief, @details, @param[in/out/inout], @return, @usage, @algorithm, @see",
            "5. Preserve ALL original Fortran code exactly - no modifications to logic",
            "6. Include 3-5 unchanged context lines around each change",
            "7. Add comments immediately before subroutine/function/module declarations",
            "8. Use appropriate GSI/EnKF terminology and mathematical notation",
            "9. Return ONLY the diff patch with no additional text or explanations",
            "",
            "FOCUS AREAS FOR THIS CLASSIFICATION:",
            f"- {chr(10).join(prompt_config.focus_areas)}" if prompt_config.focus_areas else "",
            "",
            "BASE FORMATTING INSTRUCTIONS:",
            prompt_config.base_prompt if prompt_config.base_prompt else ""
        ]
        
        return "\n".join(filter(None, prompt_parts))
    
    def _create_classification_specific_diff_template(self, classification: str, 
                                                    fortran_code: str) -> str:
        """Create a classification-specific diff template example"""
        template_examples = {
            "background_grid": "Focus on grid operations and spatial transformations",
            "core_analysis": "Focus on data assimilation algorithms and optimization",
            "io_interface": "Focus on file I/O operations and data management",
            "observation_processing": "Focus on observation operators and quality control",
            "utilities": "Focus on utility functions and support operations"
        }
        return template_examples.get(classification, "Focus on general Fortran functionality")
    
    def _load_processing_status(self) -> Dict[str, Dict]:
        """Load processing status from file"""
        if self.status_file.exists():
            try:
                with open(self.status_file, 'r') as f:
                    return json.load(f)
            except Exception as e:
                logger.warning(f"Failed to load processing status: {e}")
        return {}
    
    def _save_processing_status(self):
        """Save processing status to file"""
        try:
            self.status_file.parent.mkdir(parents=True, exist_ok=True)
            with open(self.status_file, 'w') as f:
                json.dump(self.processed_files, f, indent=2)
        except Exception as e:
            logger.error(f"Failed to save processing status: {e}")
    
    def _get_file_checksum(self, file_path: str) -> str:
        """Calculate MD5 checksum of file"""
        try:
            with open(file_path, 'rb') as f:
                return hashlib.md5(f.read()).hexdigest()
        except Exception as e:
            logger.error(f"Failed to calculate checksum for {file_path}: {e}")
            return ""
    
    def _is_file_already_processed(self, file_path: str) -> bool:
        """Check if file has already been successfully processed"""
        # If retry flag is set, always reprocess
        retry_flag = getattr(self.config, 'retry_processed', False)
        if retry_flag:
            logger.info(f"Retry flag set - reprocessing file: {file_path}")
            return False
            
        if file_path not in self.processed_files:
            return False
            
        file_info = self.processed_files[file_path]
        current_checksum = self._get_file_checksum(file_path)
        
        # Check if file has been modified since last processing
        if file_info.get('checksum') != current_checksum:
            logger.info(f"File modified since last processing: {file_path}")
            return False
            
        # Check if last processing was successful
        if file_info.get('status') == 'success':
            logger.info(f"Skipping already processed file: {file_path}")
            return True
            
        return False
    
    def compare_line_counts(self) -> int:
        """Compare line counts between original files and their commented versions.
        
        Returns:
            int: 0 for success, 1 for failure
        """
        try:
            # Discover files using the classifier
            files = self.discover_files()
            if not files:
                print("No files found to compare")
                return 1
                
            tasks, _ = self.create_processing_tasks(files)
            
            if not tasks:
                print("No valid processing tasks created")
                return 1
            
            print(f"Comparing line counts for {len(tasks)} files:")
            print("=" * 80)
            
            total_original_lines = 0
            total_current_lines = 0
            processed_count = 0
            not_processed_count = 0
            
            for task in tasks:
                file_path = task.file_path
                
                # Check if file has been processed
                if file_path not in self.processed_files:
                    not_processed_count += 1
                    continue
                
                file_info = self.processed_files[file_path]
                if file_info.get('status') != 'success':
                    not_processed_count += 1
                    continue
                
                # Count current lines
                try:
                    with open(file_path, 'r', encoding='utf-8') as f:
                        current_lines = len(f.readlines())
                except Exception as e:
                    logger.warning(f"Could not read {file_path}: {e}")
                    continue
                
                # Get original line count from backup or estimate
                original_lines = self._get_original_line_count(file_path)
                
                if original_lines > 0:
                    lines_added = current_lines - original_lines
                    percentage = (lines_added / original_lines) * 100 if original_lines > 0 else 0
                    
                    # Format the output
                    status = "+" if lines_added > 0 else "=" if lines_added == 0 else "-"
                    print(f"{file_path:60} | {original_lines:4d} -> {current_lines:4d} | {status}{abs(lines_added):3d} ({percentage:+5.1f}%)")
                    
                    total_original_lines += original_lines
                    total_current_lines += current_lines
                    processed_count += 1
            
            # Print summary
            print("=" * 80)
            total_added = total_current_lines - total_original_lines
            overall_percentage = (total_added / total_original_lines) * 100 if total_original_lines > 0 else 0
            
            print(f"SUMMARY:")
            print(f"  Processed files: {processed_count}")
            print(f"  Not processed: {not_processed_count}")
            print(f"  Total original lines: {total_original_lines:,}")
            print(f"  Total current lines: {total_current_lines:,}")
            print(f"  Lines added: +{total_added:,} ({overall_percentage:+5.1f}%)")
            
            if processed_count == 0:
                print("No successfully processed files found to compare")
                return 1
                
            return 0
            
        except Exception as e:
            logger.error(f"Error in compare mode: {e}")
            print(f"Error: {e}")
            return 1
    
    def _get_original_line_count(self, file_path: str) -> int:
        """Get original line count from backup files or estimate from current size"""
        try:
            # First try to find a backup file
            backup_pattern = f"{Path(file_path).name}.*.backup"
            possible_backup_dirs = [
                self.backup_dir,
                Path(self.config_dir) / "backups",
                Path(file_path).parent / "backups"
            ]
            
            backup_file = None
            for backup_dir in possible_backup_dirs:
                if backup_dir and backup_dir.exists():
                    backups = list(backup_dir.glob(backup_pattern))
                    if backups:
                        # Get the oldest backup (closest to original)
                        backup_file = sorted(backups, key=lambda x: x.stat().st_mtime)[0]
                        break
            
            if backup_file and backup_file.exists():
                with open(backup_file, 'r', encoding='utf-8') as f:
                    return len(f.readlines())
            
            # If no backup, estimate based on processing history
            if file_path in self.processed_files:
                file_info = self.processed_files[file_path]
                # Try to estimate original size (rough approximation)
                # This is not perfect but gives some indication
                with open(file_path, 'r', encoding='utf-8') as f:
                    current_lines = len(f.readlines())
                
                # Assume roughly 20-30% increase from comments as rough estimate
                estimated_original = int(current_lines / 1.25)  # Assume 25% increase
                return estimated_original
                
        except Exception as e:
            logger.warning(f"Could not determine original line count for {file_path}: {e}")
            
        return 0
    
    def _process_task_with_status_check(self, task: ProcessingTask) -> Tuple[bool, str, bool]:
        """
        Process a single task with status checking
        Returns: (success, message, was_skipped)
        """
        # Check if file is already processed
        if self._is_file_already_processed(task.file_path):
            return True, "Already processed", True
        
        # Process the file
        success, message = self.process_single_file(task)
        
        # Mark file as processed
        self._mark_file_processed(task.file_path, success, message)
        
        # Log the operation
        if success:
            self._log_llm_operation("FILE_PROCESSED", task.file_path, "Successfully added comments", True)
        else:
            self._log_llm_operation("FILE_FAILED", task.file_path, f"Processing failed: {message}", False)
        
        return success, message, False
    
    def _generate_diff_with_fallback(self, prompt: str, file_path: str, file_content: str, classification: str) -> Optional[str]:
        """Generate diff with primary LLM - NO FALLBACK, report errors directly"""
        try:
            # Use primary LLM manager only
            result = self.primary_llm_manager.generate_diff_patch(prompt, file_content, file_path)
            if result:
                return result
            else:
                logger.error(f"LLM failed to generate valid diff for {file_path}")
                self._log_llm_operation("GENERATION_FAILED", Path(file_path).name, 
                                      "LLM failed to generate valid diff", False)
                return None
            
        except Exception as e:
            logger.error(f"Error in LLM generation: {e}")
            self._log_llm_operation("GENERATION_ERROR", Path(file_path).name, f"Error: {e}", False)
            return None
    
    def _mark_file_processed(self, file_path: str, success: bool, message: str = ""):
        """Mark file as processed with status"""
        self.processed_files[file_path] = {
            'status': 'success' if success else 'failed',
            'timestamp': time.strftime("%Y-%m-%d %H:%M:%S"),
            'checksum': self._get_file_checksum(file_path),
            'message': message
        }
        self._save_processing_status()
    
    def _setup_llm_logging(self):
        """Setup dedicated LLM operations logging"""
        self.llm_logger = logging.getLogger('llm_operations')
        self.llm_logger.setLevel(logging.INFO)
        
        # Remove existing handlers
        for handler in self.llm_logger.handlers[:]:
            self.llm_logger.removeHandler(handler)
        
        # Create file handler for LLM operations
        llm_handler = logging.FileHandler(self.llm_log_file)
        llm_handler.setLevel(logging.INFO)
        
        # Create formatter
        formatter = logging.Formatter(
            '%(asctime)s - %(levelname)s - %(message)s'
        )
        llm_handler.setFormatter(formatter)
        
        self.llm_logger.addHandler(llm_handler)
        self.llm_logger.propagate = False  # Don't propagate to parent logger
        
        self.llm_logger.info("=== LLM Operations Log Started ===")
    
    def _log_llm_operation(self, operation: str, file_path: str, details: str = "", success: bool = True):
        """Log LLM operation details"""
        status = "SUCCESS" if success else "FAILED"
        self.llm_logger.info(f"{operation} | {status} | {file_path} | {details}")
    
    def process_files(self) -> ProcessingResults:
        """Process all discovered files and return results"""
        import time
        start_time = time.time()
        
        # Discover files
        files = self.discover_files()
        if not files:
            logger.warning("No files discovered for processing")
            return ProcessingResults(
                successful_files=[],
                failed_files=[],
                total_files=0,
                processing_time=0.0,
                statistics={}
            )
        
        # Create processing tasks
        tasks, task_stats = self.create_processing_tasks(files)
        if not tasks:
            logger.warning("No processing tasks created")
            return ProcessingResults(
                successful_files=[],
                failed_files=[],
                total_files=0,
                processing_time=0.0,
                statistics=task_stats
            )
        
        # Process files with parallel processing
        successful_files = []
        failed_files = []
        skipped_files = []
        
        logger.info(f"Starting to process {len(tasks)} tasks with {getattr(self.config, 'max_workers', 4)} workers")
        
        if getattr(self.config, 'max_workers', 4) == 1:
            # Sequential processing for single worker
            for i, task in enumerate(tasks):
                logger.info(f"Processing {i+1}/{len(tasks)}: {task.file_path}")
                success, message, skipped = self._process_task_with_status_check(task)
                
                if skipped:
                    skipped_files.append(task.file_path)
                    successful_files.append(task.file_path)
                elif success:
                    successful_files.append(task.file_path)
                else:
                    failed_files.append((task.file_path, message))
        else:
            # Parallel processing with ThreadPoolExecutor
            from concurrent.futures import ThreadPoolExecutor, as_completed
            
            max_workers = getattr(self.config, 'max_workers', 4)
            with ThreadPoolExecutor(max_workers=max_workers) as executor:
                # Submit all tasks
                future_to_task = {executor.submit(self._process_task_with_status_check, task): task 
                                 for task in tasks}
                
                # Process completed tasks
                completed = 0
                for future in as_completed(future_to_task):
                    task = future_to_task[future]
                    completed += 1
                    
                    try:
                        success, message, skipped = future.result()
                        
                        if skipped:
                            skipped_files.append(task.file_path)
                            successful_files.append(task.file_path)
                            logger.info(f"[{completed}/{len(tasks)}] Skipped (already processed): {task.file_path}")
                        elif success:
                            successful_files.append(task.file_path)
                            logger.info(f"[{completed}/{len(tasks)}] Successfully processed: {task.file_path}")
                        else:
                            failed_files.append((task.file_path, message))
                            logger.error(f"[{completed}/{len(tasks)}] Failed to process: {task.file_path} - {message}")
                            
                    except Exception as e:
                        failed_files.append((task.file_path, f"Processing exception: {e}"))
                        logger.error(f"[{completed}/{len(tasks)}] Exception processing {task.file_path}: {e}")
        
        processing_time = time.time() - start_time
        
        # Create results
        results = ProcessingResults(
            successful_files=successful_files,
            failed_files=failed_files,
            total_files=len(tasks),
            processing_time=processing_time,
            statistics={
                'task_creation': task_stats,
                'success_rate': len(successful_files) / len(tasks) * 100 if tasks else 0,
                'average_file_time': processing_time / len(tasks) if tasks else 0,
                'skipped_files': len(skipped_files),
                'processed_files': len(successful_files) - len(skipped_files),
                'actually_processed': [f for f in successful_files if f not in skipped_files]
            }
        )
        
        logger.info(f"Processing completed: {len(successful_files)} successful, {len(failed_files)} failed, {len(skipped_files)} skipped")
        self._log_llm_operation("BATCH_COMPLETE", "ALL_FILES", 
                               f"Total: {len(tasks)}, Success: {len(successful_files)}, Failed: {len(failed_files)}, Skipped: {len(skipped_files)}", True)
        
        return results
