"""
Numerical analysis module.
Detects and analyzes numerical patterns, anomalies, and trends in text data.
"""

import re
import math
import numpy as np
from typing import Dict, List, Tuple, Any, Optional, Union
from dataclasses import dataclass
from collections import defaultdict

@dataclass
class NumericalStatistics:
    """Container for numerical analysis statistics."""
    total_numbers: int = 0
    valid_numbers: List[float] = None
    invalid_numbers: List[str] = None
    scientific_notation_count: int = 0
    large_numbers_count: int = 0
    small_numbers_count: int = 0
    nan_count: int = 0
    inf_count: int = 0
    zero_division_count: int = 0
    number_distribution: Dict[str, float] = None
    anomaly_summary: Dict[str, Any] = None
    
class NumericalAnalyzer:
    """Analyzes numerical content and detects anomalies in text."""
    
    def __init__(self, config):
        self.config = config
        self.stats = NumericalStatistics()
        self.stats.valid_numbers = []
        self.stats.invalid_numbers = []
        
        # Compile regex patterns for efficiency
        self._compile_patterns()
        
    def _compile_patterns(self):
        """Compile regex patterns for numerical detection."""
        # Scientific notation pattern (e.g., 1.23E+04, -4.56e-02)
        self.scientific_pattern = re.compile(
            r'[-+]?\d*\.?\d+[eE][-+]?\d+',
            re.IGNORECASE
        )
        
        # General number pattern (including decimals, negatives)
        self.number_pattern = re.compile(
            r'[-+]?(?:\d+\.?\d*|\.\d+)(?:[eE][-+]?\d+)?'
        )
        
        # NaN patterns
        self.nan_pattern = re.compile(
            r'\b(?:nan|NaN|NAN|null|NULL|N/A|n/a)\b',
            re.IGNORECASE
        )
        
        # Infinity patterns
        self.inf_pattern = re.compile(
            r'\b(?:inf|infinity|∞|INF|INFINITY)\b',
            re.IGNORECASE
        )
        
        # Zero division patterns
        self.zero_div_pattern = re.compile(
            r'(?:div(?:ide)?\s*by\s*zero|zero\s*div(?:ision)?|1/0|÷0)',
            re.IGNORECASE
        )
        
    def analyze(self, text: str) -> Dict[str, Any]:
        """Perform comprehensive numerical analysis on text."""
        
        # Extract all numerical patterns
        numbers = self._extract_numbers(text)
        
        # Detect anomalies
        anomalies = self._detect_anomalies(text)
        
        # Analyze number distribution
        distribution = self._analyze_number_distribution(numbers)
        
        # Calculate statistics
        self._calculate_statistics(numbers, anomalies)
        
        return self._compile_results(distribution, anomalies)
    
    def _extract_numbers(self, text: str) -> List[float]:
        """Extract and validate numerical values from text."""
        valid_numbers = []
        invalid_numbers = []
        
        # Find all number-like patterns
        matches = self.number_pattern.findall(text)
        
        for match in matches:
            try:
                # Try to convert to float
                number = float(match)
                
                # Check for invalid values
                if math.isnan(number):
                    invalid_numbers.append(match)
                elif math.isinf(number):
                    invalid_numbers.append(match)
                else:
                    valid_numbers.append(number)
                    
            except (ValueError, OverflowError):
                invalid_numbers.append(match)
                
        self.stats.valid_numbers = valid_numbers
        self.stats.invalid_numbers = invalid_numbers
        self.stats.total_numbers = len(valid_numbers) + len(invalid_numbers)
        
        return valid_numbers
    
    def _detect_anomalies(self, text: str) -> Dict[str, Any]:
        """Detect various types of numerical anomalies."""
        anomalies = {
            'nan_occurrences': [],
            'inf_occurrences': [],
            'zero_division_occurrences': [],
            'large_numbers': [],
            'small_numbers': [],
            'scientific_notation': []
        }
        
        # Detect NaN values
        if self.config.detect_nan:
            nan_matches = self.nan_pattern.findall(text)
            anomalies['nan_occurrences'] = nan_matches
            self.stats.nan_count = len(nan_matches)
            
        # Detect infinity values
        if self.config.detect_inf:
            inf_matches = self.inf_pattern.findall(text)
            anomalies['inf_occurrences'] = inf_matches
            self.stats.inf_count = len(inf_matches)
            
        # Detect zero division
        if self.config.detect_zero_division:
            zero_div_matches = self.zero_div_pattern.findall(text)
            anomalies['zero_division_occurrences'] = zero_div_matches
            self.stats.zero_division_count = len(zero_div_matches)
            
        # Detect scientific notation
        scientific_matches = self.scientific_pattern.findall(text)
        anomalies['scientific_notation'] = scientific_matches
        self.stats.scientific_notation_count = len(scientific_matches)
        
        # Detect large and small numbers
        if self.config.detect_large_numbers:
            for number in self.stats.valid_numbers:
                if abs(number) > self.config.large_number_threshold:
                    anomalies['large_numbers'].append(number)
                elif 0 < abs(number) < self.config.small_number_threshold:
                    anomalies['small_numbers'].append(number)
                    
        self.stats.large_numbers_count = len(anomalies['large_numbers'])
        self.stats.small_numbers_count = len(anomalies['small_numbers'])
        
        return anomalies
    
    def _analyze_number_distribution(self, numbers: List[float]) -> Dict[str, Any]:
        """Analyze the statistical distribution of numbers."""
        if not numbers:
            return {}
            
        numbers_array = np.array(numbers)
        
        # Remove zeros for certain calculations
        non_zero_numbers = numbers_array[numbers_array != 0]
        
        distribution = {
            'count': len(numbers),
            'mean': float(np.mean(numbers_array)),
            'median': float(np.median(numbers_array)),
            'std': float(np.std(numbers_array)),
            'min': float(np.min(numbers_array)),
            'max': float(np.max(numbers_array)),
            'range': float(np.max(numbers_array) - np.min(numbers_array)),
            'q25': float(np.percentile(numbers_array, 25)),
            'q75': float(np.percentile(numbers_array, 75)),
            'zeros_count': int(np.sum(numbers_array == 0)),
            'positives_count': int(np.sum(numbers_array > 0)),
            'negatives_count': int(np.sum(numbers_array < 0))
        }
        
        # Add skewness and kurtosis if enough data
        if len(numbers) > 3:
            from scipy import stats
            try:
                distribution['skewness'] = float(stats.skew(numbers_array))
                distribution['kurtosis'] = float(stats.kurtosis(numbers_array))
            except ImportError:
                # Fallback to manual calculation
                mean = distribution['mean']
                std = distribution['std']
                if std > 0:
                    skewness = np.mean(((numbers_array - mean) / std) ** 3)
                    kurtosis = np.mean(((numbers_array - mean) / std) ** 4) - 3
                    distribution['skewness'] = float(skewness)
                    distribution['kurtosis'] = float(kurtosis)
        
        # Magnitude analysis
        if len(non_zero_numbers) > 0:
            log_magnitudes = np.log10(np.abs(non_zero_numbers))
            distribution['magnitude_analysis'] = {
                'mean_log_magnitude': float(np.mean(log_magnitudes)),
                'std_log_magnitude': float(np.std(log_magnitudes)),
                'magnitude_range': float(np.max(log_magnitudes) - np.min(log_magnitudes))
            }
            
        return distribution
    
    def _calculate_statistics(self, numbers: List[float], anomalies: Dict[str, Any]):
        """Calculate comprehensive numerical statistics."""
        # Create anomaly summary
        self.stats.anomaly_summary = {
            'total_anomalies': (
                self.stats.nan_count + 
                self.stats.inf_count + 
                self.stats.zero_division_count + 
                self.stats.large_numbers_count + 
                self.stats.small_numbers_count
            ),
            'anomaly_rate': 0.0,
            'anomaly_types': {
                'nan': self.stats.nan_count,
                'inf': self.stats.inf_count,
                'zero_division': self.stats.zero_division_count,
                'large_numbers': self.stats.large_numbers_count,
                'small_numbers': self.stats.small_numbers_count,
                'scientific_notation': self.stats.scientific_notation_count
            }
        }
        
        # Calculate anomaly rate
        if self.stats.total_numbers > 0:
            self.stats.anomaly_summary['anomaly_rate'] = (
                self.stats.anomaly_summary['total_anomalies'] / self.stats.total_numbers
            )
    
    def extract_sequences(self, text: str, sequence_patterns: Optional[List[str]] = None) -> Dict[str, List[float]]:
        """Extract numerical sequences based on patterns (e.g., iteration data)."""
        sequences = {}
        
        # Enhanced patterns with meaningful names and descriptions
        if sequence_patterns is None:
            pattern_definitions = [
                {
                    'name': 'iteration_norms',
                    'description': 'Norm values from iterative solver iterations',
                    'pattern': r'(?i)iteration\s+(\d+).*?norm\s*=\s*([\d\.eE+-]+)',
                    'keywords': ['iteration', 'norm']
                },
                {
                    'name': 'step_residuals', 
                    'description': 'Residual values from computational steps',
                    'pattern': r'(?i)step\s*=\s*(\d+).*?(?:norm|residual)\s*[=:]\s*([\d\.eE+-]+)',
                    'keywords': ['step', 'residual', 'norm']
                },
                {
                    'name': 'residual_sequence',
                    'description': 'General residual value sequence',
                    'pattern': r'(?i)residual.*?([\d\.eE+-]+)',
                    'keywords': ['residual']
                },
                {
                    'name': 'alpha_parameters',
                    'description': 'Alpha parameter values (step sizes)',
                    'pattern': r'(?i)alpha\s*[=:]\s*([\d\.eE+-]+)',
                    'keywords': ['alpha', 'parameter']
                },
                {
                    'name': 'convergence_rates',
                    'description': 'Convergence rate measurements',
                    'pattern': r'(?i)convergence.*?([\d\.eE+-]+)',
                    'keywords': ['convergence', 'rate']
                },
                {
                    'name': 'error_values',
                    'description': 'Error measurements and tolerances',
                    'pattern': r'(?i)error\s*[=:]\s*([\d\.eE+-]+)',
                    'keywords': ['error']
                },
                {
                    'name': 'tolerance_values',
                    'description': 'Tolerance and threshold values',
                    'pattern': r'(?i)(?:tolerance|tol)\s*[=:]\s*([\d\.eE+-]+)',
                    'keywords': ['tolerance', 'threshold']
                }
            ]
        else:
            # If custom patterns provided, use generic naming
            pattern_definitions = [
                {
                    'name': f'custom_sequence_{i+1}',
                    'description': f'Custom pattern {i+1} extracted sequence',
                    'pattern': pattern,
                    'keywords': ['custom']
                }
                for i, pattern in enumerate(sequence_patterns)
            ]
        
        # Extract sequences using enhanced patterns
        for pattern_def in pattern_definitions:
            pattern = pattern_def['pattern']
            sequence_name = pattern_def['name']
            
            matches = re.findall(pattern, text, re.MULTILINE)
            
            if matches:
                try:
                    # Handle different match formats
                    if isinstance(matches[0], tuple):
                        # Extract the numerical part (usually the last element)
                        numbers = [float(match[-1]) for match in matches if self._is_valid_number(match[-1])]
                    else:
                        numbers = [float(match) for match in matches if self._is_valid_number(match)]
                    
                    if numbers:
                        sequences[sequence_name] = {
                            'values': numbers,
                            'description': pattern_def['description'],
                            'keywords': pattern_def['keywords'],
                            'pattern_used': pattern,
                            'match_count': len(matches)
                        }
                except (ValueError, IndexError):
                    continue
        
        # Special handling for common computational log patterns with better naming
        special_patterns = [
            {
                'name': 'solver_residual_norms',
                'description': 'Residual norm values from linear solver iterations',
                'pattern': r'residual norm\s*=\s*([\d\.eE+-]+)',
                'keywords': ['solver', 'residual', 'norm']
            },
            {
                'name': 'relative_errors',
                'description': 'Relative error measurements during iterations',
                'pattern': r'relative_error\s*=\s*([\d\.eE+-]+)',
                'keywords': ['relative', 'error']
            },
            {
                'name': 'performance_metrics',
                'description': 'Performance measurements (GFLOPS, memory, etc.)',
                'pattern': r'(?i)(?:gflops|memory|performance).*?([\d\.eE+-]+)',
                'keywords': ['performance', 'metrics']
            }
        ]
        
        for special_def in special_patterns:
            matches = re.findall(special_def['pattern'], text, re.IGNORECASE)
            if matches:
                try:
                    numbers = [float(match) for match in matches if self._is_valid_number(match)]
                    if numbers:
                        sequences[special_def['name']] = {
                            'values': numbers,
                            'description': special_def['description'],
                            'keywords': special_def['keywords'],
                            'pattern_used': special_def['pattern'],
                            'match_count': len(matches)
                        }
                except ValueError:
                    pass
        
        return sequences
    
    def _is_valid_number(self, value: str) -> bool:
        """Check if a string represents a valid number."""
        try:
            num = float(value)
            return not (math.isnan(num) or math.isinf(num))
        except (ValueError, TypeError):
            return False
    
    def _compile_results(self, distribution: Dict[str, Any], anomalies: Dict[str, Any]) -> Dict[str, Any]:
        """Compile numerical analysis results."""
        return {
            'summary': {
                'total_numbers_found': self.stats.total_numbers,
                'valid_numbers': len(self.stats.valid_numbers),
                'invalid_numbers': len(self.stats.invalid_numbers),
                'scientific_notation_count': self.stats.scientific_notation_count
            },
            'distribution': distribution,
            'anomalies': {
                'summary': self.stats.anomaly_summary,
                'details': anomalies
            },
            'health_score': self._calculate_health_score()
        }
    
    def _calculate_health_score(self) -> float:
        """Calculate a numerical health score (0-1, higher is better)."""
        if self.stats.total_numbers == 0:
            return 1.0  # No numbers, no problems
            
        # Base score starts at 1.0
        health_score = 1.0
        
        # Penalties for different types of anomalies
        penalties = {
            'nan': 0.3,
            'inf': 0.3,
            'zero_division': 0.5,
            'large_numbers': 0.1,
            'small_numbers': 0.05
        }
        
        # Apply penalties based on anomaly rates
        for anomaly_type, penalty in penalties.items():
            count = self.stats.anomaly_summary['anomaly_types'].get(anomaly_type, 0)
            if count > 0:
                rate = count / self.stats.total_numbers
                health_score -= penalty * rate
        
        return max(0.0, health_score)
