#!/usr/bin/env python3
"""
Code Classifier for Fortran Code Commentator

Enhanced code classifier with robust file discovery and mapping.
Classifies Fortran code files into functional categories for targeted commenting.

Originally designed for GSI/EnKF codebase but generalized for any Fortran project.
"""

import json
import logging
import time
from pathlib import Path
from typing import Dict, List, Optional, Any

from ..core.models import GSICodeClassification

logger = logging.getLogger(__name__)




class CodeClassifier:
    """Enhanced code classifier with robust file discovery and mapping"""
    
    def __init__(self, base_path: str):
        self.base_path = Path(base_path)
        self.classification_stats = {}
        self.discovery_stats = {}
        self.classification_map = self._load_classification_map()
        
    def _load_classification_map(self) -> GSICodeClassification:
        """Load code classification with enhanced validation and discovery"""
        classification = GSICodeClassification()
        
        # Try different possible classification directory names
        classification_paths = [
            self.base_path / "gsi_classification",
            self.base_path / "classification", 
            self.base_path / "code_classification"
        ]
        
        classification_path = None
        for path in classification_paths:
            if path.exists():
                classification_path = path
                break
        
        if not classification_path:
            logger.warning(f"Classification directory not found in any of: {[str(p) for p in classification_paths]}")
            return classification
        
        # Load files from each classification directory with validation
        categories = ["background_grid", "core_analysis", "io_interface", 
                     "observation_processing", "utilities"]
        
        for category in categories:
            category_path = classification_path / category
            if not category_path.exists():
                logger.warning(f"Category directory not found: {category_path}")
                continue
                
            # Discover all Fortran files
            fortran_files = []
            for pattern in ["*.f90", "*.F90", "*.f95", "*.F95", "*.f03", "*.F03"]:
                fortran_files.extend(category_path.glob(pattern))
            
            file_names = [f.name for f in fortran_files if f.is_file()]
            setattr(classification, category, file_names)
            
            # Collect statistics
            self.classification_stats[category] = {
                'file_count': len(file_names),
                'directory_exists': True,
                'sample_files': file_names[:5]  # First 5 files as examples
            }
            
            logger.info(f"Loaded {len(file_names)} files for category: {category}")
            if len(file_names) > 0:
                logger.debug(f"Sample files in {category}: {file_names[:3]}")
        
        # Validate classification completeness
        total_files = sum(len(getattr(classification, cat)) for cat in categories)
        logger.info(f"Total classified files: {total_files}")
        
        return classification
    
    def discover_fortran_files(self, search_paths: List[str]) -> Dict[str, List[str]]:
        """Enhanced Fortran file discovery with multiple search paths"""
        discovered_files = {
            'gsi_classification': [],
            'gsi_main': [],
            'enkf_main': [],
            'all_files': []
        }
        
        for search_path_str in search_paths:
            search_path = Path(search_path_str)
            if not search_path.exists():
                logger.warning(f"Search path does not exist: {search_path}")
                continue
                
            logger.info(f"Discovering Fortran files in: {search_path}")
            
            # Search for all Fortran files
            patterns = ["**/*.f90", "**/*.F90", "**/*.f95", "**/*.F95", "**/*.f03", "**/*.F03"]
            found_files = []
            
            for pattern in patterns:
                found_files.extend(search_path.glob(pattern))
            
            # Categorize discovered files
            for file_path in found_files:
                if file_path.is_file():
                    str_path = str(file_path)
                    discovered_files['all_files'].append(str_path)
                    
                    if 'classification' in str_path:
                        discovered_files['gsi_classification'].append(str_path)
                    elif '/src/' in str_path and 'classification' not in str_path:
                        discovered_files['gsi_main'].append(str_path)
                    elif '/enkf/' in str_path or 'enkf' in str_path:
                        discovered_files['enkf_main'].append(str_path)
        
        # Log discovery statistics
        for category, files in discovered_files.items():
            logger.info(f"Discovered {len(files)} files in category: {category}")
        
        self.discovery_stats = {
            category: len(files) for category, files in discovered_files.items()
        }
        
        return discovered_files
    
    def classify_file(self, file_path: str) -> str:
        """Enhanced file classification with validation and fallback logic"""
        file_name = Path(file_path).name
        
        # Direct classification mapping
        classification_mappings = [
            (self.classification_map.background_grid, "background_grid"),
            (self.classification_map.core_analysis, "core_analysis"),
            (self.classification_map.io_interface, "io_interface"),
            (self.classification_map.observation_processing, "observation_processing"),
            (self.classification_map.utilities, "utilities")
        ]
        
        for file_list, category in classification_mappings:
            if file_name in file_list:
                return category
        
        # Fallback classification based on file name patterns
        return self._classify_by_pattern(file_name, file_path)
    
    def _classify_by_pattern(self, file_name: str, file_path: str) -> str:
        """Fallback classification using file name and path patterns"""
        file_lower = file_name.lower()
        path_lower = file_path.lower()
        
        # Background grid patterns
        grid_patterns = ['grid', 'interp', 'transform', 'coord', 'domain', 'background', 
                        'ensemble', 'berror', 'bkgvar', 'prewgt']
        if any(pattern in file_lower for pattern in grid_patterns):
            return "background_grid"
        
        # Core analysis patterns
        analysis_patterns = ['anal', 'cost', 'minim', 'control', 'jfunc', 'grad', 
                           'bicg', 'pcg', '4dvar', 'variational']
        if any(pattern in file_lower for pattern in analysis_patterns):
            return "core_analysis"
        
        # I/O interface patterns
        io_patterns = ['read', 'write', 'io', 'netcdf', 'bufr', 'grib', 'diag', 
                      'guess', 'file', 'interface']
        if any(pattern in file_lower for pattern in io_patterns):
            return "io_interface"
        
        # Observation processing patterns
        obs_patterns = ['obs', 'rad', 'conv', 'gps', 'setup', 'qc', 'thin', 
                       'sat', 'sonde', 'aircr', 'surface']
        if any(pattern in file_lower for pattern in obs_patterns):
            return "observation_processing"
        
        # Default to utilities
        logger.debug(f"Using default classification 'utilities' for: {file_name}")
        return "utilities"
    
    def validate_classification_accuracy(self) -> Dict[str, Any]:
        """Validate classification accuracy and completeness"""
        validation_results = {
            'total_classified_files': 0,
            'category_distribution': {},
            'classification_quality': {},
            'missing_files': [],
            'recommendations': []
        }
        
        # Count files in each category
        for category in ['background_grid', 'core_analysis', 'io_interface', 
                        'observation_processing', 'utilities']:
            file_list = getattr(self.classification_map, category)
            validation_results['category_distribution'][category] = len(file_list)
            validation_results['total_classified_files'] += len(file_list)
        
        # Quality assessment
        for category, stats in self.classification_stats.items():
            if stats['file_count'] == 0:
                validation_results['classification_quality'][category] = 'EMPTY'
                validation_results['recommendations'].append(
                    f"Category {category} has no files - review classification"
                )
            elif stats['file_count'] < 10:
                validation_results['classification_quality'][category] = 'LOW'
                validation_results['recommendations'].append(
                    f"Category {category} has few files ({stats['file_count']}) - verify completeness"
                )
            else:
                validation_results['classification_quality'][category] = 'GOOD'
        
        return validation_results
    
    def get_classification_statistics(self) -> Dict[str, Any]:
        """Get comprehensive classification statistics"""
        return {
            'classification_stats': self.classification_stats,
            'discovery_stats': self.discovery_stats,
            'validation_results': self.validate_classification_accuracy()
        }
    
    def create_classification_mapping_report(self, output_file: str = None) -> Dict[str, Any]:
        """Create detailed classification mapping report"""
        report = {
            'report_timestamp': time.strftime('%Y-%m-%d %H:%M:%S'),
            'classification_summary': self.classification_stats,
            'discovery_summary': self.discovery_stats,
            'validation_results': self.validate_classification_accuracy(),
            'detailed_mapping': {},
            'recommendations': [],
            'quality_metrics': {}
        }
        
        # Create detailed mapping for each category
        categories = ['background_grid', 'core_analysis', 'io_interface', 
                     'observation_processing', 'utilities']
        
        for category in categories:
            file_list = getattr(self.classification_map, category)
            report['detailed_mapping'][category] = {
                'file_count': len(file_list),
                'files': file_list,
                'sample_files': file_list[:10] if len(file_list) > 10 else file_list,
                'classification_pattern_analysis': self._analyze_classification_patterns(file_list)
            }
        
        # Generate quality metrics
        total_files = sum(len(getattr(self.classification_map, cat)) for cat in categories)
        report['quality_metrics'] = {
            'total_classified_files': total_files,
            'largest_category': max(categories, key=lambda c: len(getattr(self.classification_map, c))),
            'smallest_category': min(categories, key=lambda c: len(getattr(self.classification_map, c))),
            'balance_score': self._calculate_balance_score(categories),
            'coverage_completeness': self._assess_coverage_completeness()
        }
        
        # Generate recommendations
        report['recommendations'] = self._generate_classification_recommendations(report)
        
        # Save report if output file specified
        if output_file:
            try:
                with open(output_file, 'w', encoding='utf-8') as f:
                    json.dump(report, f, indent=2, ensure_ascii=False)
                logger.info(f"Classification mapping report saved to: {output_file}")
            except Exception as e:
                logger.error(f"Failed to save report to {output_file}: {e}")
        
        return report
    
    def _analyze_classification_patterns(self, file_list: List[str]) -> Dict[str, Any]:
        """Analyze patterns in file classifications"""
        patterns = {
            'common_prefixes': {},
            'common_suffixes': {},
            'pattern_keywords': {},
            'file_length_distribution': {'short': 0, 'medium': 0, 'long': 0}
        }
        
        for filename in file_list:
            # Analyze prefixes (first 3 characters)
            prefix = filename[:3].lower()
            patterns['common_prefixes'][prefix] = patterns['common_prefixes'].get(prefix, 0) + 1
            
            # Analyze suffixes (before .f90)
            base_name = filename.replace('.f90', '').replace('.F90', '')
            if '_' in base_name:
                suffix = base_name.split('_')[-1].lower()
                patterns['common_suffixes'][suffix] = patterns['common_suffixes'].get(suffix, 0) + 1
            
            # Analyze filename length
            if len(base_name) < 10:
                patterns['file_length_distribution']['short'] += 1
            elif len(base_name) < 20:
                patterns['file_length_distribution']['medium'] += 1
            else:
                patterns['file_length_distribution']['long'] += 1
        
        # Extract top patterns
        patterns['top_prefixes'] = sorted(patterns['common_prefixes'].items(), 
                                        key=lambda x: x[1], reverse=True)[:5]
        patterns['top_suffixes'] = sorted(patterns['common_suffixes'].items(), 
                                        key=lambda x: x[1], reverse=True)[:5]
        
        return patterns
    
    def _calculate_balance_score(self, categories: List[str]) -> float:
        """Calculate classification balance score (0-1, where 1 is perfectly balanced)"""
        file_counts = [len(getattr(self.classification_map, cat)) for cat in categories]
        total_files = sum(file_counts)
        
        if total_files == 0:
            return 0.0
        
        # Calculate ideal distribution (equal across all categories)
        ideal_per_category = total_files / len(categories)
        
        # Calculate deviation from ideal
        deviations = [(count - ideal_per_category) ** 2 for count in file_counts]
        mean_squared_deviation = sum(deviations) / len(deviations)
        
        # Convert to balance score (closer to 1 means more balanced)
        max_possible_deviation = (ideal_per_category) ** 2
        balance_score = 1.0 - (mean_squared_deviation / max_possible_deviation)
        
        return max(0.0, balance_score)
    
    def _assess_coverage_completeness(self) -> Dict[str, Any]:
        """Assess how complete the classification coverage is"""
        return {
            'has_all_categories': all(
                len(getattr(self.classification_map, cat)) > 0 
                for cat in ['background_grid', 'core_analysis', 'io_interface', 
                           'observation_processing', 'utilities']
            ),
            'empty_categories': [
                cat for cat in ['background_grid', 'core_analysis', 'io_interface', 
                               'observation_processing', 'utilities']
                if len(getattr(self.classification_map, cat)) == 0
            ],
            'well_populated_categories': [
                cat for cat in ['background_grid', 'core_analysis', 'io_interface', 
                               'observation_processing', 'utilities']
                if len(getattr(self.classification_map, cat)) >= 10
            ]
        }
    
    def _generate_classification_recommendations(self, report: Dict[str, Any]) -> List[str]:
        """Generate actionable recommendations based on classification analysis"""
        recommendations = []
        
        # Check balance
        if report['quality_metrics']['balance_score'] < 0.5:
            recommendations.append(
                "Classification distribution is unbalanced. Consider reviewing file assignments to achieve more even distribution."
            )
        
        # Check empty categories
        empty_cats = report['quality_metrics']['coverage_completeness']['empty_categories']
        if empty_cats:
            recommendations.append(
                f"Empty categories detected: {', '.join(empty_cats)}. Review if these categories are needed or if files are misclassified."
            )
        
        # Check total coverage
        total_files = report['quality_metrics']['total_classified_files']
        if total_files < 100:
            recommendations.append(
                "Low number of classified files. Consider expanding classification to include more source files."
            )
        
        # Check discovery vs classification alignment
        if 'discovery_stats' in report and report['discovery_stats']:
            discovered_total = report['discovery_stats'].get('all_files', 0)
            if discovered_total > total_files * 2:
                recommendations.append(
                    "Many discovered files are not pre-classified. Consider expanding classification directory structure."
                )
        
        return recommendations

