"""
Trend analysis module.
Analyzes numerical sequences for convergence, divergence, and trend patterns.
"""

import numpy as np
import math
from typing import Dict, List, Tuple, Any, Optional
from dataclasses import dataclass
from enum import Enum

class TrendType(Enum):
    """Enumeration of different trend types."""
    CONVERGENCE = "convergence"
    DIVERGENCE = "divergence"
    STAGNATION = "stagnation"
    OSCILLATION = "oscillation"
    ACCELERATION = "acceleration"
    DECELERATION = "deceleration"
    INSUFFICIENT_DATA = "insufficient_data"
    MIXED = "mixed"

class ConvergenceRate(Enum):
    """Convergence rate classifications."""
    SUPERLINEAR = "superlinear"  # ratio < 0.1
    LINEAR_FAST = "linear_fast"  # ratio < 0.5
    LINEAR_SLOW = "linear_slow"  # ratio < 0.9
    SUBLINEAR = "sublinear"      # ratio < 1.0
    DIVERGING = "diverging"      # ratio >= 1.0

@dataclass
class TrendStatistics:
    """Container for trend analysis statistics."""
    trend_type: TrendType = TrendType.INSUFFICIENT_DATA
    convergence_rate: Optional[ConvergenceRate] = None
    mean_change: float = 0.0
    acceleration: float = 0.0
    stability_score: float = 0.0
    convergence_ratio: Optional[float] = None
    stagnation_periods: List[Tuple[int, int]] = None
    trend_changes: List[Dict[str, Any]] = None
    confidence_score: float = 0.0

class TrendAnalyzer:
    """Analyzes trends in numerical sequences."""
    
    def __init__(self, config):
        self.config = config
        
    def analyze_sequence(self, sequence: List[float], sequence_name: str = "unnamed") -> Dict[str, Any]:
        """Analyze trends in a numerical sequence."""
        
        if len(sequence) < self.config.min_sequence_length:
            return {
                'sequence_name': sequence_name,
                'trend_type': TrendType.INSUFFICIENT_DATA.value,
                'message': f'Insufficient data points: {len(sequence)} < {self.config.min_sequence_length}'
            }
        
        stats = TrendStatistics()
        stats.stagnation_periods = []
        stats.trend_changes = []
        
        # Convert to numpy array for analysis
        data = np.array(sequence)
        
        # Basic trend analysis
        trend_info = self._analyze_basic_trend(data)
        stats.trend_type = trend_info['type']
        stats.mean_change = trend_info['mean_change']
        stats.acceleration = trend_info['acceleration']
        
        # Convergence analysis
        convergence_info = self._analyze_convergence(data)
        stats.convergence_rate = convergence_info['rate']
        stats.convergence_ratio = convergence_info['ratio']
        
        # Stability analysis
        stats.stability_score = self._calculate_stability_score(data)
        
        # Stagnation detection
        stats.stagnation_periods = self._detect_stagnation_periods(data)
        
        # Trend change detection
        stats.trend_changes = self._detect_trend_changes(data)
        
        # Confidence score
        stats.confidence_score = self._calculate_confidence_score(data, stats)
        
        return self._compile_trend_results(stats, sequence_name, data)
    
    def analyze_multiple_sequences(self, sequences: Dict[str, Any]) -> Dict[str, Any]:
        """Analyze trends in multiple numerical sequences."""
        results = {}
        sequence_values = {}  # For comparative analysis
        
        for name, sequence_info in sequences.items():
            # Handle both old format (list) and new format (dict with metadata)
            if isinstance(sequence_info, dict) and 'values' in sequence_info:
                sequence_data = sequence_info['values']
                sequence_description = sequence_info.get('description', 'Numerical sequence')
                sequence_keywords = sequence_info.get('keywords', [])
                pattern_used = sequence_info.get('pattern_used', 'N/A')
                match_count = sequence_info.get('match_count', len(sequence_data))
            else:
                # Fallback for old format (pure list)
                sequence_data = sequence_info
                sequence_description = 'Numerical sequence'
                sequence_keywords = []
                pattern_used = 'N/A'
                match_count = len(sequence_data) if sequence_data else 0
            
            # Analyze the sequence
            analysis_result = self.analyze_sequence(sequence_data, name)
            
            # Add metadata to results
            analysis_result['sequence_metadata'] = {
                'description': sequence_description,
                'keywords': sequence_keywords,
                'pattern_used': pattern_used,
                'match_count': match_count,
                'sequence_type': self._categorize_sequence_type(sequence_keywords, name)
            }
            
            results[name] = analysis_result
            sequence_values[name] = sequence_data  # For comparative analysis
        
        # Add comparative analysis
        if len(sequence_values) > 1:
            results['comparative_analysis'] = self._compare_sequences(sequence_values)
        
        return results
    
    def _categorize_sequence_type(self, keywords: List[str], name: str) -> str:
        """Categorize the type of sequence based on keywords and name."""
        name_lower = name.lower()
        keywords_lower = [kw.lower() for kw in keywords]
        
        # Check for specific sequence types
        if any(kw in ['residual', 'norm'] for kw in keywords_lower) or 'residual' in name_lower or 'norm' in name_lower:
            return 'convergence_metric'
        elif any(kw in ['error', 'tolerance'] for kw in keywords_lower) or 'error' in name_lower:
            return 'error_metric'
        elif any(kw in ['alpha', 'parameter', 'step'] for kw in keywords_lower) or 'alpha' in name_lower or 'step' in name_lower:
            return 'algorithm_parameter'
        elif any(kw in ['performance', 'gflops', 'memory'] for kw in keywords_lower) or 'performance' in name_lower:
            return 'performance_metric'
        elif any(kw in ['iteration', 'solver'] for kw in keywords_lower) or 'iteration' in name_lower:
            return 'iteration_progress'
        elif 'convergence' in keywords_lower or 'convergence' in name_lower:
            return 'convergence_rate'
        else:
            return 'general_numeric'
    
    def _analyze_basic_trend(self, data: np.ndarray) -> Dict[str, Any]:
        """Analyze basic trend characteristics."""
        # Calculate differences
        first_diff = np.diff(data)
        second_diff = np.diff(first_diff) if len(first_diff) > 1 else np.array([])
        
        # Mean change and acceleration
        mean_change = np.mean(first_diff)
        acceleration = np.mean(second_diff) if len(second_diff) > 0 else 0.0
        
        # Determine trend type
        trend_type = self._classify_trend(data, first_diff, second_diff, mean_change, acceleration)
        
        return {
            'type': trend_type,
            'mean_change': mean_change,
            'acceleration': acceleration,
            'first_diff': first_diff,
            'second_diff': second_diff
        }
    
    def _classify_trend(self, data: np.ndarray, first_diff: np.ndarray, 
                       second_diff: np.ndarray, mean_change: float, acceleration: float) -> TrendType:
        """Classify the type of trend in the data."""
        
        # Check for stagnation first
        variance = np.var(first_diff)
        if variance < self.config.stagnation_variance_threshold:
            return TrendType.STAGNATION
        
        # Check for oscillation
        sign_changes = np.sum(np.diff(np.sign(first_diff)) != 0)
        oscillation_ratio = sign_changes / len(first_diff) if len(first_diff) > 0 else 0
        if oscillation_ratio > 0.5:  # More than 50% sign changes
            return TrendType.OSCILLATION
        
        # Determine main trend direction
        abs_mean_change = abs(mean_change)
        data_scale = np.std(data) if np.std(data) > 0 else 1.0
        relative_change = abs_mean_change / data_scale
        
        # Check for convergence (decreasing trend in absolute values)
        if len(data) > 2:
            abs_data = np.abs(data)
            abs_trend = np.mean(np.diff(abs_data))
            
            if abs_trend < -relative_change * 0.1:  # Decreasing absolute values
                if acceleration > 0:  # Accelerating convergence
                    return TrendType.CONVERGENCE
                else:
                    return TrendType.CONVERGENCE
        
        # Check for divergence
        if mean_change > relative_change * 0.1:
            if acceleration > 0:
                return TrendType.ACCELERATION
            else:
                return TrendType.DIVERGENCE
        elif mean_change < -relative_change * 0.1:
            if acceleration < 0:
                return TrendType.ACCELERATION
            else:
                return TrendType.CONVERGENCE
        
        # Default to mixed if unclear
        return TrendType.MIXED
    
    def _analyze_convergence(self, data: np.ndarray) -> Dict[str, Any]:
        """Analyze convergence characteristics of the sequence."""
        
        if len(data) < self.config.convergence_rate_window:
            return {'rate': None, 'ratio': None}
        
        # Calculate convergence ratio using residual-like analysis
        # Use the last portion of data for convergence rate estimation
        window_size = min(self.config.convergence_rate_window, len(data) // 2)
        recent_data = data[-window_size:]
        
        if len(recent_data) < 3:
            return {'rate': None, 'ratio': None}
        
        # Calculate successive ratios
        ratios = []
        for i in range(1, len(recent_data)):
            if abs(recent_data[i-1]) > 1e-15:  # Avoid division by very small numbers
                ratio = abs(recent_data[i]) / abs(recent_data[i-1])
                if ratio < 10:  # Filter out extreme ratios
                    ratios.append(ratio)
        
        if not ratios:
            return {'rate': None, 'ratio': None}
        
        # Use median ratio for robustness
        convergence_ratio = np.median(ratios)
        
        # Classify convergence rate
        if convergence_ratio < 0.1:
            rate = ConvergenceRate.SUPERLINEAR
        elif convergence_ratio < 0.5:
            rate = ConvergenceRate.LINEAR_FAST
        elif convergence_ratio < 0.9:
            rate = ConvergenceRate.LINEAR_SLOW
        elif convergence_ratio < 1.0:
            rate = ConvergenceRate.SUBLINEAR
        else:
            rate = ConvergenceRate.DIVERGING
        
        return {'rate': rate, 'ratio': convergence_ratio}
    
    def _calculate_stability_score(self, data: np.ndarray) -> float:
        """Calculate a stability score for the sequence (0-1, higher is more stable)."""
        
        if len(data) < 2:
            return 0.0
        
        # Calculate relative variance
        mean_val = np.mean(np.abs(data))
        if mean_val == 0:
            return 1.0 if np.all(data == 0) else 0.0
        
        relative_std = np.std(data) / mean_val
        
        # Convert to stability score (inverse of relative variation)
        stability_score = 1.0 / (1.0 + relative_std)
        
        return stability_score
    
    def _detect_stagnation_periods(self, data: np.ndarray) -> List[Tuple[int, int]]:
        """Detect periods where the sequence stagnates."""
        
        if len(data) < 3:
            return []
        
        stagnation_periods = []
        window_size = max(3, self.config.trend_smoothing_window)
        
        i = 0
        while i <= len(data) - window_size:
            window = data[i:i + window_size]
            window_variance = np.var(window)
            
            if window_variance < self.config.stagnation_variance_threshold:
                # Found start of stagnation period
                start_idx = i
                
                # Extend the period as long as variance remains low
                end_idx = i + window_size
                while end_idx < len(data):
                    extended_window = data[start_idx:end_idx + 1]
                    if np.var(extended_window) < self.config.stagnation_variance_threshold:
                        end_idx += 1
                    else:
                        break
                
                stagnation_periods.append((start_idx, end_idx - 1))
                i = end_idx
            else:
                i += 1
        
        return stagnation_periods
    
    def _detect_trend_changes(self, data: np.ndarray) -> List[Dict[str, Any]]:
        """Detect significant changes in trend direction."""
        
        if len(data) < 6:  # Need minimum data for trend change detection
            return []
        
        trend_changes = []
        window_size = max(3, self.config.trend_smoothing_window)
        
        # Smooth the data using moving average
        smoothed_data = self._smooth_data(data, window_size)
        
        # Calculate first and second derivatives
        first_deriv = np.diff(smoothed_data)
        second_deriv = np.diff(first_deriv)
        
        # Detect sign changes in second derivative (trend changes)
        for i in range(1, len(second_deriv)):
            if (second_deriv[i-1] * second_deriv[i] < 0 and 
                abs(second_deriv[i] - second_deriv[i-1]) > np.std(second_deriv) * 0.5):
                
                trend_changes.append({
                    'position': i + window_size,  # Adjust for smoothing offset
                    'type': 'acceleration_change',
                    'magnitude': abs(second_deriv[i] - second_deriv[i-1]),
                    'value': data[min(i + window_size, len(data) - 1)]
                })
        
        return trend_changes
    
    def _smooth_data(self, data: np.ndarray, window_size: int) -> np.ndarray:
        """Apply moving average smoothing to data."""
        if window_size >= len(data):
            return data
        
        smoothed = np.convolve(data, np.ones(window_size)/window_size, mode='valid')
        return smoothed
    
    def _calculate_confidence_score(self, data: np.ndarray, stats: TrendStatistics) -> float:
        """Calculate confidence score for the trend analysis."""
        
        confidence = 0.0
        
        # Data length factor
        length_factor = min(1.0, len(data) / (self.config.min_sequence_length * 3))
        confidence += length_factor * 0.3
        
        # Stability factor
        confidence += stats.stability_score * 0.3
        
        # Trend consistency factor
        if stats.trend_type != TrendType.MIXED:
            confidence += 0.2
        
        # Convergence analysis factor
        if stats.convergence_rate is not None:
            confidence += 0.2
        
        return min(1.0, confidence)
    
    def _compare_sequences(self, sequences: Dict[str, List[float]]) -> Dict[str, Any]:
        """Compare trends across multiple sequences."""
        
        comparison = {
            'sequence_count': len(sequences),
            'length_statistics': {},
            'trend_distribution': {},
            'convergence_comparison': {},
            'correlation_analysis': {}
        }
        
        # Length statistics
        lengths = [len(seq) for seq in sequences.values()]
        comparison['length_statistics'] = {
            'min_length': min(lengths),
            'max_length': max(lengths),
            'mean_length': np.mean(lengths),
            'std_length': np.std(lengths)
        }
        
        # Trend type distribution
        trend_counts = {}
        convergence_rates = []
        
        for name, sequence in sequences.items():
            if len(sequence) >= self.config.min_sequence_length:
                trend_result = self.analyze_sequence(sequence, name)
                trend_type = trend_result.get('trend_type', 'unknown')
                trend_counts[trend_type] = trend_counts.get(trend_type, 0) + 1
                
                conv_ratio = trend_result.get('convergence_ratio')
                if conv_ratio is not None:
                    convergence_rates.append(conv_ratio)
        
        comparison['trend_distribution'] = trend_counts
        
        if convergence_rates:
            comparison['convergence_comparison'] = {
                'mean_convergence_ratio': np.mean(convergence_rates),
                'std_convergence_ratio': np.std(convergence_rates),
                'best_convergence': min(convergence_rates),
                'worst_convergence': max(convergence_rates)
            }
        
        return comparison
    
    def _compile_trend_results(self, stats: TrendStatistics, sequence_name: str, data: np.ndarray) -> Dict[str, Any]:
        """Compile trend analysis results into a structured format."""
        
        result = {
            'sequence_name': sequence_name,
            'data_points': len(data),
            'trend_type': stats.trend_type.value,
            'trend_metrics': {
                'mean_change': round(stats.mean_change, 6),
                'acceleration': round(stats.acceleration, 6),
                'stability_score': round(stats.stability_score, 4),
                'confidence_score': round(stats.confidence_score, 4)
            },
            'data_range': {
                'min': float(np.min(data)),
                'max': float(np.max(data)),
                'first': float(data[0]),
                'last': float(data[-1]),
                'change': float(data[-1] - data[0])
            }
        }
        
        # Add convergence information if available
        if stats.convergence_rate is not None:
            result['convergence_analysis'] = {
                'convergence_rate': stats.convergence_rate.value,
                'convergence_ratio': round(stats.convergence_ratio, 6)
            }
        
        # Add stagnation information if found
        if stats.stagnation_periods:
            result['stagnation_periods'] = [
                {'start': start, 'end': end, 'length': end - start + 1}
                for start, end in stats.stagnation_periods
            ]
        
        # Add trend changes if found
        if stats.trend_changes:
            result['trend_changes'] = stats.trend_changes
        
        return result
