"""
Simple analysis tools for GCR Solver Manager.

Basic norm comparison and performance analysis without complex dependencies.
"""

import json
import logging
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple
from datetime import datetime, timedelta

from ..database.simple_db import SimpleDatabase
from ..logs.manager import LogManager

logger = logging.getLogger(__name__)


class SimpleAnalyzer:
    """Simple analysis tools for GCR solvers."""
    
    def __init__(self, db: Optional[SimpleDatabase] = None, log_manager: Optional[LogManager] = None):
        """Initialize analyzer."""
        self.db = db or SimpleDatabase()
        self.log_manager = log_manager or LogManager()
    
    def compare_norms(self, run_id1: int, run_id2: int) -> Optional[Dict[str, Any]]:
        """Compare norms between two test runs."""
        return self.db.compare_norms(run_id1, run_id2)
    
    def analyze_convergence(self, run_id: int) -> Optional[Dict[str, Any]]:
        """Analyze convergence data from a test run."""
        run = self.db.get_by_id(run_id)
        if not run or run['type'] != 'test':
            return None
        
        results = run.get('results', {})
        norms = results.get('norms', {})
        
        if not norms:
            return None
        
        # Basic convergence analysis
        analysis = {
            'run_id': run_id,
            'solver': run['solver'],
            'gpu': run['gpu'],
            'total_norms': len(norms),
            'norms': norms,
            'converged': results.get('converged', False),
            'final_residual': results.get('final_residual'),
            'analysis_date': datetime.now().isoformat()
        }
        
        # Calculate convergence rate if we have multiple norms
        if len(norms) > 1:
            norm_values = list(norms.values())
            if len(norm_values) > 1:
                # Simple convergence rate calculation
                initial = norm_values[0]
                final = norm_values[-1]
                if initial > 0 and final > 0:
                    analysis['convergence_rate'] = (initial - final) / initial
                    analysis['orders_of_magnitude'] = abs(round((initial / final).bit_length() - 1))
        
        return analysis
    
    def generate_performance_report(self, solver: Optional[str] = None, gpu: Optional[str] = None,
                                  days: int = 7) -> Dict[str, Any]:
        """Generate performance report for recent runs."""
        cutoff = datetime.now() - timedelta(days=days)
        
        # Get recent test runs
        runs = self.db.get_latest(100, 'test', solver, gpu)
        recent_runs = [r for r in runs if datetime.fromisoformat(r['created_at']) > cutoff]
        
        if not recent_runs:
            return {'message': 'No recent test runs found'}
        
        # Analyze performance trends
        report = {
            'period': f"{days} days",
            'total_runs': len(recent_runs),
            'solver': solver or 'all',
            'gpu': gpu or 'all',
            'runs': [],
            'summary': {
                'success_rate': 0,
                'average_convergence': 0,
                'convergence_distribution': {}
            }
        }
        
        successful_runs = 0
        total_norms = 0
        
        for run in recent_runs:
            results = run.get('results', {})
            norms = results.get('norms', {})
            
            run_analysis = {
                'run_id': run['id'],
                'solver': run['solver'],
                'gpu': run['gpu'],
                'status': run['status'],
                'timestamp': run['created_at'],
                'norms': norms,
                'converged': results.get('converged', False)
            }
            
            if run['status'] == 'success':
                successful_runs += 1
            
            if norms:
                total_norms += 1
                # Categorize convergence
                final_residual = list(norms.values())[-1] if norms else None
                if final_residual:
                    if final_residual < 1e-12:
                        category = 'high'
                    elif final_residual < 1e-8:
                        category = 'good'
                    elif final_residual < 1e-4:
                        category = 'moderate'
                    else:
                        category = 'poor'
                    
                    run_analysis['convergence_category'] = category
                    if category not in report['summary']['convergence_distribution']:
                        report['summary']['convergence_distribution'][category] = 0
                    report['summary']['convergence_distribution'][category] += 1
            
            report['runs'].append(run_analysis)
        
        # Calculate summary statistics
        if recent_runs:
            report['summary']['success_rate'] = successful_runs / len(recent_runs)
        
        return report
    
    def compare_solver_performance(self, solver1: str, solver2: str, gpu: Optional[str] = None,
                                 days: int = 7) -> Dict[str, Any]:
        """Compare performance between two solvers."""
        cutoff = datetime.now() - timedelta(days=days)
        
        # Get runs for both solvers
        all_runs = self.db.get_latest(200, 'test', None, gpu)
        recent_runs = [r for r in all_runs if datetime.fromisoformat(r['created_at']) > cutoff]
        
        solver1_runs = [r for r in recent_runs if r['solver'] == solver1]
        solver2_runs = [r for r in recent_runs if r['solver'] == solver2]
        
        if not solver1_runs or not solver2_runs:
            return {'error': f'Insufficient data for comparison between {solver1} and {solver2}'}
        
        comparison = {
            'solver1': solver1,
            'solver2': solver2,
            'gpu': gpu or 'all',
            'period': f"{days} days",
            'solver1_runs': len(solver1_runs),
            'solver2_runs': len(solver2_runs),
            'solver1_success_rate': 0,
            'solver2_success_rate': 0,
            'norm_comparison': {}
        }
        
        # Calculate success rates
        solver1_success = sum(1 for r in solver1_runs if r['status'] == 'success')
        solver2_success = sum(1 for r in solver2_runs if r['status'] == 'success')
        
        if solver1_runs:
            comparison['solver1_success_rate'] = solver1_success / len(solver1_runs)
        if solver2_runs:
            comparison['solver2_success_rate'] = solver2_success / len(solver2_runs)
        
        # Compare final residuals for common norm types
        solver1_norms = {}
        solver2_norms = {}
        
        for run in solver1_runs:
            norms = run.get('results', {}).get('norms', {})
            for norm_name, value in norms.items():
                if norm_name not in solver1_norms:
                    solver1_norms[norm_name] = []
                solver1_norms[norm_name].append(value)
        
        for run in solver2_runs:
            norms = run.get('results', {}).get('norms', {})
            for norm_name, value in norms.items():
                if norm_name not in solver2_norms:
                    solver2_norms[norm_name] = []
                solver2_norms[norm_name].append(value)
        
        # Compare common norm types
        common_norms = set(solver1_norms.keys()) & set(solver2_norms.keys())
        for norm_name in common_norms:
            if solver1_norms[norm_name] and solver2_norms[norm_name]:
                avg1 = sum(solver1_norms[norm_name]) / len(solver1_norms[norm_name])
                avg2 = sum(solver2_norms[norm_name]) / len(solver2_norms[norm_name])
                
                comparison['norm_comparison'][norm_name] = {
                    f'{solver1}_avg': avg1,
                    f'{solver2}_avg': avg2,
                    'ratio': avg1 / avg2 if avg2 > 0 else None,
                    'better_solver': solver1 if avg1 < avg2 else solver2
                }
        
        return comparison
    
    def export_analysis(self, output_file: str, solver: Optional[str] = None, 
                       gpu: Optional[str] = None, days: int = 30) -> bool:
        """Export analysis data to JSON file."""
        try:
            report = self.generate_performance_report(solver, gpu, days)
            
            with open(output_file, 'w') as f:
                json.dump(report, f, indent=2, default=str)
            
            logger.info(f"Analysis exported to {output_file}")
            return True
            
        except Exception as e:
            logger.error(f"Failed to export analysis: {e}")
            return False
    
    def get_trend_analysis(self, metric: str = 'final_residual', days: int = 30) -> Dict[str, Any]:
        """Analyze trends for a specific metric."""
        cutoff = datetime.now() - timedelta(days=days)
        
        # Get all test runs
        runs = self.db.get_latest(1000, 'test')
        recent_runs = [r for r in runs if datetime.fromisoformat(r['created_at']) > cutoff]
        
        if not recent_runs:
            return {'message': 'No recent test runs found'}
        
        trends = {
            'metric': metric,
            'period': f"{days} days",
            'total_runs': len(recent_runs),
            'trends_by_solver': {},
            'trends_by_gpu': {},
            'daily_stats': []
        }
        
        # Group by date
        daily_data = {}
        for run in recent_runs:
            date_key = run['created_at'][:10]  # YYYY-MM-DD
            if date_key not in daily_data:
                daily_data[date_key] = []
            daily_data[date_key].append(run)
        
        # Calculate daily statistics
        for date_key, day_runs in sorted(daily_data.items()):
            day_stats = {
                'date': date_key,
                'total_runs': len(day_runs),
                'successful_runs': sum(1 for r in day_runs if r['status'] == 'success'),
                'metrics': {}
            }
            
            # Calculate metrics for each run
            for run in day_runs:
                results = run.get('results', {})
                norms = results.get('norms', {})
                
                if norms:
                    final_residual = list(norms.values())[-1]
                    solver = run['solver']
                    gpu = run['gpu'] or 'cpu'
                    
                    if solver not in day_stats['metrics']:
                        day_stats['metrics'][solver] = []
                    if gpu not in day_stats['metrics']:
                        day_stats['metrics'][gpu] = []
                    
                    day_stats['metrics'][solver].append(final_residual)
                    day_stats['metrics'][gpu].append(final_residual)
            
            # Calculate averages
            for key, values in day_stats['metrics'].items():
                if values:
                    day_stats['metrics'][key] = sum(values) / len(values)
            
            trends['daily_stats'].append(day_stats)
        
        return trends
    
    def validate_consistency(self, cpu_gpu_pairs: List[Tuple[str, str]]) -> Dict[str, Any]:
        """Validate consistency between CPU and GPU implementations."""
        validation = {
            'pairs_analyzed': len(cpu_gpu_pairs),
            'consistent_pairs': 0,
            'inconsistent_pairs': 0,
            'details': []
        }
        
        for solver, gpu in cpu_gpu_pairs:
            # Get recent CPU and GPU runs
            cpu_runs = self.db.get_latest(10, 'test', solver, 'cpu')
            gpu_runs = self.db.get_latest(10, 'test', solver, gpu)
            
            if not cpu_runs or not gpu_runs:
                continue
            
            # Compare final residuals
            cpu_final = cpu_runs[0].get('results', {}).get('final_residual')
            gpu_final = gpu_runs[0].get('results', {}).get('final_residual')
            
            if cpu_final and gpu_final:
                tolerance = 1e-10
                consistent = abs(cpu_final - gpu_final) <= tolerance * max(abs(cpu_final), abs(gpu_final))
                
                validation['details'].append({
                    'solver': solver,
                    'gpu': gpu,
                    'cpu_final': cpu_final,
                    'gpu_final': gpu_final,
                    'consistent': consistent,
                    'relative_error': abs(cpu_final - gpu_final) / max(abs(cpu_final), abs(gpu_final))
                })
                
                if consistent:
                    validation['consistent_pairs'] += 1
                else:
                    validation['inconsistent_pairs'] += 1
        
        return validation
    
    def export_runs(self, output_path: str, format: str = 'json', 
                   run_type: Optional[str] = None, solver: Optional[str] = None, 
                   days: int = 30) -> bool:
        """
        Export run data to JSON or CSV format.
        
        Args:
            output_path: Path to save exported data
            format: Export format ('json' or 'csv')
            run_type: Filter by run type ('build' or 'test')
            solver: Filter by solver type
            days: Export runs from last N days
            
        Returns:
            True if export successful
        """
        try:
            # Get runs from the specified time period
            cutoff_date = datetime.now() - timedelta(days=days)
            
            # Get all runs and filter by date
            all_runs = self.db.get_latest(1000, run_type, solver)  # Get many runs
            
            # Filter by date (assuming created_at is in ISO format)
            filtered_runs = []
            for run in all_runs:
                try:
                    created_at = datetime.fromisoformat(run['created_at'].replace('Z', '+00:00'))
                    if created_at >= cutoff_date:
                        filtered_runs.append(run)
                except:
                    # Include runs where date parsing fails (better to include than exclude)
                    filtered_runs.append(run)
            
            if not filtered_runs:
                logger.warning("No runs found matching the criteria")
                return False
            
            output_path = Path(output_path)
            output_path.parent.mkdir(parents=True, exist_ok=True)
            
            if format.lower() == 'json':
                return self._export_json(filtered_runs, output_path)
            elif format.lower() == 'csv':
                return self._export_csv(filtered_runs, output_path)
            else:
                logger.error(f"Unsupported export format: {format}")
                return False
                
        except Exception as e:
            logger.error(f"Export failed: {e}")
            return False
    
    def _export_json(self, runs: List[Dict[str, Any]], output_path: Path) -> bool:
        """Export runs to JSON format."""
        try:
            export_data = {
                'export_metadata': {
                    'timestamp': datetime.now().isoformat(),
                    'total_runs': len(runs),
                    'version': '1.0'
                },
                'runs': runs
            }
            
            with open(output_path, 'w') as f:
                json.dump(export_data, f, indent=2, default=str)
            
            logger.info(f"Exported {len(runs)} runs to JSON: {output_path}")
            return True
            
        except Exception as e:
            logger.error(f"JSON export failed: {e}")
            return False
    
    def _export_csv(self, runs: List[Dict[str, Any]], output_path: Path) -> bool:
        """Export runs to CSV format."""
        try:
            import csv
            
            if not runs:
                return False
            
            # Define CSV columns
            columns = ['id', 'type', 'solver', 'gpu', 'status', 'created_at', 'log_path']
            
            # Add dynamic columns for config and results
            all_config_keys = set()
            all_results_keys = set()
            
            for run in runs:
                if run.get('config'):
                    config = run['config']
                    if isinstance(config, dict):
                        all_config_keys.update(config.keys())
                
                if run.get('results'):
                    results = run['results']
                    if isinstance(results, dict):
                        all_results_keys.update(results.keys())
            
            # Add config and results columns
            config_columns = [f'config_{key}' for key in sorted(all_config_keys)]
            results_columns = [f'results_{key}' for key in sorted(all_results_keys)]
            
            all_columns = columns + config_columns + results_columns
            
            with open(output_path, 'w', newline='') as f:
                writer = csv.DictWriter(f, fieldnames=all_columns)
                writer.writeheader()
                
                for run in runs:
                    row = {}
                    
                    # Basic columns
                    for col in columns:
                        row[col] = run.get(col, '')
                    
                    # Config columns
                    config = run.get('config', {})
                    if isinstance(config, dict):
                        for key in all_config_keys:
                            row[f'config_{key}'] = config.get(key, '')
                    
                    # Results columns
                    results = run.get('results', {})
                    if isinstance(results, dict):
                        for key in all_results_keys:
                            value = results.get(key, '')
                            # Handle nested data (like norms)
                            if isinstance(value, dict):
                                value = json.dumps(value)
                            row[f'results_{key}'] = value
                    
                    writer.writerow(row)
            
            logger.info(f"Exported {len(runs)} runs to CSV: {output_path}")
            return True
            
        except Exception as e:
            logger.error(f"CSV export failed: {e}")
            return False