#!/usr/bin/env python3
"""
File Processing Module for Fortran Code Commentary System

This module provides file processing operations for Fortran source code,
including filtering, validation, content reading/writing, and statistics tracking.
Designed to work with automated code commentary systems that use LLM models
to generate structured comments compatible with documentation generators.

Features:
- Robust file filtering with size and type validation
- Comprehensive file statistics tracking
- Error-resistant file operations with proper encoding handling
- Support for Fortran file extensions (.f90, .f95, .f03, .f08)
- Backup creation for safe file modifications
- Line counting and file information extraction

Author: Fortran Commentator Development Team
Date: 2025-08-12
"""

import os
import logging
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Any

# Get logger for this module
logger = logging.getLogger(__name__)


class FileProcessor:
    """
    Enhanced file processing operations for Fortran source code with robust filtering and validation.
    
    This class provides comprehensive file processing capabilities including:
    - File discovery and filtering based on size and type
    - Fortran file validation and line counting
    - File statistics tracking and reporting
    - Safe file reading and writing operations with backup support
    - Detailed error handling and logging
    
    Attributes:
        max_lines (int): Maximum number of lines for a file to be considered processable
        file_stats (dict): Statistics about discovered and processed files
    """
    
    def __init__(self, max_lines: int = 1500):
        """
        Initialize the FileProcessor with configurable parameters.
        
        Args:
            max_lines (int): Maximum number of lines for a file to be considered processable.
                           Files exceeding this limit will be skipped during filtering.
        """
        self.max_lines = max_lines
        self.file_stats = {
            'total_discovered': 0,
            'filtered_by_size': 0,
            'filtered_by_extension': 0,
            'processing_eligible': 0,
            'file_type_distribution': {}
        }
        
    def count_lines(self, file_path: str) -> int:
        """
        Count the total number of lines in a file with robust error handling.
        
        Args:
            file_path (str): Path to the file to count lines in
            
        Returns:
            int: Number of lines in the file, or 0 if file cannot be read
        """
        try:
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                return sum(1 for _ in f)
        except FileNotFoundError:
            logger.warning(f"File not found: {file_path}")
            return 0
        except PermissionError:
            logger.warning(f"Permission denied reading: {file_path}")
            return 0
        except Exception as e:
            logger.error(f"Error counting lines in {file_path}: {e}")
            return 0
    
    def get_file_info(self, file_path: str) -> Dict[str, Any]:
        """
        Extract comprehensive information about a file.
        
        Args:
            file_path (str): Path to the file to analyze
            
        Returns:
            Dict[str, Any]: Dictionary containing file metadata including:
                - path: Full file path
                - name: File name only
                - size_bytes: File size in bytes
                - line_count: Number of lines (for text files)
                - extension: File extension in lowercase
                - is_fortran: Boolean indicating if file is a Fortran source file
                - is_processable: Boolean indicating if file meets processing criteria
                - last_modified: File modification timestamp
        """
        file_info = {
            'path': file_path,
            'name': Path(file_path).name,
            'size_bytes': 0,
            'line_count': 0,
            'extension': Path(file_path).suffix.lower(),
            'is_fortran': False,
            'is_processable': False,
            'last_modified': None
        }
        
        try:
            stat = os.stat(file_path)
            file_info['size_bytes'] = stat.st_size
            file_info['last_modified'] = stat.st_mtime
            
            # Check if it's a Fortran file
            file_info['is_fortran'] = file_info['extension'] in ['.f90', '.f95', '.f03', '.f08']
            
            if file_info['is_fortran']:
                file_info['line_count'] = self.count_lines(file_path)
                file_info['is_processable'] = (
                    0 < file_info['line_count'] <= self.max_lines
                )
                
        except Exception as e:
            logger.error(f"Error getting file info for {file_path}: {e}")
            
        return file_info
    
    def filter_files_by_size(self, files: List[str]) -> Tuple[List[str], Dict[str, Any]]:
        """
        Filter a list of files based on size and type criteria with detailed statistics.
        
        Args:
            files (List[str]): List of file paths to filter
            
        Returns:
            Tuple[List[str], Dict[str, Any]]: A tuple containing:
                - List of processable file paths that passed all filters
                - Dictionary with detailed filtering statistics including:
                    - total_input: Number of input files
                    - fortran_files: Number of Fortran files found
                    - oversized_files: Number of files exceeding max_lines
                    - empty_files: Number of empty files
                    - processable_files: Number of files suitable for processing
                    - size_distribution: Distribution of files by size category
                    - skipped_files: List of (path, reason) tuples for skipped files
        """
        filtered_files = []
        filter_stats = {
            'total_input': len(files),
            'fortran_files': 0,
            'oversized_files': 0,
            'empty_files': 0,
            'processable_files': 0,
            'size_distribution': {'small': 0, 'medium': 0, 'large': 0, 'oversized': 0},
            'skipped_files': []
        }
        
        for file_path in files:
            file_info = self.get_file_info(file_path)
            
            if not file_info['is_fortran']:
                continue
                
            filter_stats['fortran_files'] += 1
            line_count = file_info['line_count']
            
            # Categorize by size
            if line_count == 0:
                filter_stats['empty_files'] += 1
                filter_stats['skipped_files'].append((file_path, f"Empty file: {line_count} lines"))
            elif line_count <= 500:
                filter_stats['size_distribution']['small'] += 1
            elif line_count <= 1000:
                filter_stats['size_distribution']['medium'] += 1
            elif line_count <= self.max_lines:
                filter_stats['size_distribution']['large'] += 1
            else:
                filter_stats['size_distribution']['oversized'] += 1
                filter_stats['oversized_files'] += 1
                filter_stats['skipped_files'].append((file_path, f"Oversized file: {line_count} lines"))
                continue
            
            if file_info['is_processable']:
                filtered_files.append(file_path)
                filter_stats['processable_files'] += 1
                
        # Log filtering results
        logger.info(f"File filtering completed:")
        logger.info(f"  Total input files: {filter_stats['total_input']}")
        logger.info(f"  Fortran files found: {filter_stats['fortran_files']}")
        logger.info(f"  Processable files: {filter_stats['processable_files']}")
        logger.info(f"  Oversized files (>{self.max_lines} lines): {filter_stats['oversized_files']}")
        logger.info(f"  Empty files: {filter_stats['empty_files']}")
        logger.info(f"  Size distribution: {filter_stats['size_distribution']}")
        
        return filtered_files, filter_stats
    
    def read_file_content(self, file_path: str) -> Optional[str]:
        """
        Read the complete content of a file with error handling.
        
        Args:
            file_path (str): Path to the file to read
            
        Returns:
            Optional[str]: File content as string, or None if reading failed
        """
        try:
            with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
                return f.read()
        except Exception as e:
            logger.error(f"Error reading {file_path}: {e}")
            return None
    
    def write_commented_file(self, file_path: str, content: str) -> bool:
        """
        Write content to a file with automatic backup creation.
        
        Creates a backup of the original file before writing new content.
        The backup file will have a '.backup' extension appended to the original filename.
        
        Args:
            file_path (str): Path to the file to write
            content (str): Content to write to the file
            
        Returns:
            bool: True if write operation succeeded, False otherwise
        """
        try:
            # Create backup
            backup_path = f"{file_path}.backup"
            if not os.path.exists(backup_path):
                with open(file_path, 'r', encoding='utf-8', errors='ignore') as src:
                    with open(backup_path, 'w', encoding='utf-8') as dst:
                        dst.write(src.read())
            
            # Write commented version
            with open(file_path, 'w', encoding='utf-8') as f:
                f.write(content)
            return True
            
        except Exception as e:
            logger.error(f"Error writing commented file {file_path}: {e}")
            return False

