#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Intermediate Value Models - Comparison Report Generator

Reads all JSON results from summary_results/ and generates:
1. Comprehensive comparison tables
2. Visualization plots
3. Statistical analysis
4. Ranking and recommendations
"""

import json
import os
import sys
from pathlib import Path
from typing import Dict, List, Tuple
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict

# Algorithm configuration
ALGORITHM_NAMES = {
    'aes': 'AES-128',
    'sm4': 'SM4',
    'rsa': 'RSA-2048',
    'ecc': 'ECC-P256',
    'sm2': 'SM2',
    'dilithium': 'Dilithium-2'
}


class ComparisonReportGenerator:
    """Generate comparison reports from summary results"""
    
    def __init__(self, summary_dir: str = None):
        """
        Initialize the report generator
        
        Args:
            summary_dir: Directory containing JSON result files
        """
        if summary_dir is None:
            base_dir = Path(__file__).parent
            summary_dir = base_dir / 'summary_results'
        
        self.summary_dir = Path(summary_dir)
        self.results = {}
        self.load_all_results()
    
    def load_all_results(self):
        """Load all JSON result files"""
        if not self.summary_dir.exists():
            print(f"Warning: Summary directory not found: {self.summary_dir}")
            print("Please run algorithm tests first.")
            return
        
        json_files = list(self.summary_dir.glob('*.json'))
        
        if not json_files:
            print(f"Warning: No result files found in {self.summary_dir}")
            return
        
        print(f"Loading {len(json_files)} result files...")
        
        for json_file in json_files:
            try:
                with open(json_file, 'r') as f:
                    data = json.load(f)
                
                # Extract algorithm and model from filename
                # Format: algorithm_model.json or algorithm_model_timestamp.json
                filename = json_file.stem
                parts = filename.split('_')
                
                if len(parts) >= 2:
                    algorithm = parts[0]
                    # Join remaining parts as model name (may contain underscores)
                    # Remove timestamp if present (last part is digits)
                    if parts[-1].isdigit() and len(parts) > 2:
                        model = '_'.join(parts[1:-1])
                    else:
                        model = '_'.join(parts[1:])
                    
                    if algorithm not in self.results:
                        self.results[algorithm] = {}
                    
                    self.results[algorithm][model] = {
                        'file': json_file.name,
                        'data': data
                    }
                    
            except Exception as e:
                print(f"Error loading {json_file}: {e}")
        
        print(f"Loaded results for {len(self.results)} algorithms")
        for algo, models in self.results.items():
            print(f"  {algo}: {len(models)} models")
    
    def extract_metrics(self, data: Dict) -> Dict:
        """Extract key metrics from result data"""
        return {
            'pearson_coef': data.get('pearson_coefficient', 0.0),
            'pearson_p': data.get('pearson_p_value', 1.0),
            'spearman_coef': data.get('spearman_coefficient', 0.0),
            'spearman_p': data.get('spearman_p_value', 1.0),
            'significance': data.get('significance', 'unknown'),
            'description': data.get('description', 'Unknown'),
            'cpu_mean': data.get('cpu_statistics', {}).get('mean', 0.0),
            'cpu_std': data.get('cpu_statistics', {}).get('std', 0.0),
            'unique_values': data.get('intermediate_statistics', {}).get('unique_count', 0)
        }
    
    def generate_summary_table(self) -> str:
        """Generate text summary table"""
        lines = []
        lines.append("=" * 100)
        lines.append("INTERMEDIATE VALUE MODELS - CORRELATION ANALYSIS SUMMARY")
        lines.append("=" * 100)
        lines.append("")
        
        for algorithm in sorted(self.results.keys()):
            algo_name = ALGORITHM_NAMES.get(algorithm, algorithm.upper())
            lines.append(f"\n{'='*100}")
            lines.append(f"{algo_name}")
            lines.append(f"{'='*100}")
            lines.append(f"{'Model':<30} {'|Pearson|':<12} {'P-value':<12} {'Significance':<15} {'CPU Mean':<10}")
            lines.append("-" * 100)
            
            models = self.results[algorithm]
            model_metrics = []
            
            for model, result in models.items():
                metrics = self.extract_metrics(result['data'])
                model_metrics.append((model, metrics))
            
            # Sort by absolute Pearson coefficient (descending)
            model_metrics.sort(key=lambda x: abs(x[1]['pearson_coef']), reverse=True)
            
            for model, metrics in model_metrics:
                lines.append(
                    f"{model:<30} "
                    f"{abs(metrics['pearson_coef']):<12.4f} "
                    f"{metrics['pearson_p']:<12.6f} "
                    f"{metrics['significance']:<15} "
                    f"{metrics['cpu_mean']:<10.2f}%"
                )
            
            lines.append("")
        
        lines.append("=" * 100)
        lines.append(f"Report generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
        lines.append("=" * 100)
        
        return '\n'.join(lines)
    
    def generate_ranking(self) -> List[Tuple]:
        """Generate overall ranking of models by correlation strength"""
        all_models = []
        
        for algorithm, models in self.results.items():
            for model, result in models.items():
                metrics = self.extract_metrics(result['data'])
                all_models.append({
                    'algorithm': algorithm,
                    'model': model,
                    'full_name': f"{algorithm}_{model}",
                    'abs_pearson': abs(metrics['pearson_coef']),
                    'pearson': metrics['pearson_coef'],
                    'p_value': metrics['pearson_p'],
                    'significance': metrics['significance'],
                    'cpu_mean': metrics['cpu_mean']
                })
        
        # Sort by absolute Pearson coefficient
        all_models.sort(key=lambda x: x['abs_pearson'], reverse=True)
        
        return all_models
    
    def plot_comparison_charts(self, output_dir: Path):
        """Generate comparison visualization charts"""
        output_dir.mkdir(parents=True, exist_ok=True)
        
        # 1. Overall ranking bar chart
        ranking = self.generate_ranking()
        top_n = min(20, len(ranking))
        
        fig, ax = plt.subplots(figsize=(14, 8))
        
        names = [r['full_name'] for r in ranking[:top_n]]
        pearson_values = [r['abs_pearson'] for r in ranking[:top_n]]
        
        # Color by significance
        colors = []
        for r in ranking[:top_n]:
            if r['p_value'] < 0.01:
                colors.append('green')
            elif r['p_value'] < 0.05:
                colors.append('orange')
            else:
                colors.append('red')
        
        bars = ax.barh(range(len(names)), pearson_values, color=colors, alpha=0.7)
        ax.set_yticks(range(len(names)))
        ax.set_yticklabels(names)
        ax.set_xlabel('|Pearson Correlation Coefficient|')
        ax.set_title(f'Top {top_n} Models by Correlation Strength')
        ax.grid(True, alpha=0.3, axis='x')
        ax.invert_yaxis()
        
        # Add value labels
        for i, (bar, val) in enumerate(zip(bars, pearson_values)):
            ax.text(val + 0.01, i, f'{val:.3f}', va='center')
        
        # Legend
        from matplotlib.patches import Patch
        legend_elements = [
            Patch(facecolor='green', alpha=0.7, label='p < 0.01 (Highly Significant)'),
            Patch(facecolor='orange', alpha=0.7, label='p < 0.05 (Significant)'),
            Patch(facecolor='red', alpha=0.7, label='p >= 0.05 (Not Significant)')
        ]
        ax.legend(handles=legend_elements, loc='lower right')
        
        plt.tight_layout()
        plt.savefig(output_dir / 'overall_ranking.png', dpi=150, bbox_inches='tight')
        plt.close()
        
        print(f"[+] Overall ranking chart saved: {output_dir / 'overall_ranking.png'}")
        
        # 2. Algorithm comparison
        fig, ax = plt.subplots(figsize=(12, 6))
        
        algo_data = defaultdict(list)
        for r in ranking:
            algo_data[r['algorithm']].append(r['abs_pearson'])
        
        algorithms = sorted(algo_data.keys())
        positions = range(len(algorithms))
        
        box_data = [algo_data[algo] for algo in algorithms]
        bp = ax.boxplot(box_data, positions=positions, labels=algorithms, patch_artist=True)
        
        for patch in bp['boxes']:
            patch.set_facecolor('lightblue')
            patch.set_alpha(0.7)
        
        ax.set_ylabel('|Pearson Correlation Coefficient|')
        ax.set_title('Correlation Distribution by Algorithm')
        ax.grid(True, alpha=0.3, axis='y')
        
        plt.tight_layout()
        plt.savefig(output_dir / 'algorithm_comparison.png', dpi=150, bbox_inches='tight')
        plt.close()
        
        print(f"[+] Algorithm comparison chart saved: {output_dir / 'algorithm_comparison.png'}")
    
    def generate_html_report(self, output_path: Path):
        """Generate interactive HTML report"""
        ranking = self.generate_ranking()
        
        html = f"""<!DOCTYPE html>
<html>
<head>
    <title>Intermediate Value Models - Comparison Report</title>
    <style>
        body {{
            font-family: Arial, sans-serif;
            margin: 20px;
            background-color: #f5f5f5;
        }}
        .container {{
            max-width: 1400px;
            margin: 0 auto;
            background-color: white;
            padding: 30px;
            box-shadow: 0 0 10px rgba(0,0,0,0.1);
        }}
        h1 {{
            color: #333;
            border-bottom: 3px solid #4CAF50;
            padding-bottom: 10px;
        }}
        h2 {{
            color: #555;
            margin-top: 30px;
        }}
        table {{
            border-collapse: collapse;
            width: 100%;
            margin: 20px 0;
        }}
        th, td {{
            border: 1px solid #ddd;
            padding: 12px;
            text-align: left;
        }}
        th {{
            background-color: #4CAF50;
            color: white;
        }}
        tr:nth-child(even) {{
            background-color: #f9f9f9;
        }}
        .sig-high {{ background-color: #d4edda; }}
        .sig-medium {{ background-color: #fff3cd; }}
        .sig-low {{ background-color: #f8d7da; }}
        .metric {{ font-weight: bold; }}
        .timestamp {{
            color: #888;
            font-size: 0.9em;
        }}
        img {{
            max-width: 100%;
            margin: 20px 0;
            border: 1px solid #ddd;
        }}
    </style>
</head>
<body>
    <div class="container">
        <h1>Intermediate Value Models - Correlation Analysis Report</h1>
        <p class="timestamp">Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
        
        <h2>Overall Ranking (Top 20)</h2>
        <table>
            <tr>
                <th>Rank</th>
                <th>Algorithm</th>
                <th>Model</th>
                <th>|Pearson|</th>
                <th>P-value</th>
                <th>Significance</th>
                <th>CPU Mean</th>
            </tr>
"""
        
        for i, r in enumerate(ranking[:20], 1):
            sig_class = 'sig-high' if r['p_value'] < 0.01 else 'sig-medium' if r['p_value'] < 0.05 else 'sig-low'
            html += f"""
            <tr class="{sig_class}">
                <td>{i}</td>
                <td>{ALGORITHM_NAMES.get(r['algorithm'], r['algorithm'])}</td>
                <td>{r['model']}</td>
                <td class="metric">{r['abs_pearson']:.4f}</td>
                <td>{r['p_value']:.6f}</td>
                <td>{r['significance']}</td>
                <td>{r['cpu_mean']:.2f}%</td>
            </tr>
"""
        
        html += """
        </table>
        
        <h2>Visualization</h2>
        <img src="overall_ranking.png" alt="Overall Ranking">
        <img src="algorithm_comparison.png" alt="Algorithm Comparison">
        
        <h2>By Algorithm</h2>
"""
        
        for algorithm in sorted(self.results.keys()):
            algo_name = ALGORITHM_NAMES.get(algorithm, algorithm.upper())
            html += f"""
        <h3>{algo_name}</h3>
        <table>
            <tr>
                <th>Model</th>
                <th>|Pearson|</th>
                <th>P-value</th>
                <th>Significance</th>
                <th>Spearman</th>
                <th>CPU Mean</th>
                <th>Unique Values</th>
            </tr>
"""
            
            models = self.results[algorithm]
            model_list = []
            for model, result in models.items():
                metrics = self.extract_metrics(result['data'])
                model_list.append((model, metrics))
            
            model_list.sort(key=lambda x: abs(x[1]['pearson_coef']), reverse=True)
            
            for model, m in model_list:
                sig_class = 'sig-high' if m['pearson_p'] < 0.01 else 'sig-medium' if m['pearson_p'] < 0.05 else 'sig-low'
                html += f"""
            <tr class="{sig_class}">
                <td>{model}</td>
                <td class="metric">{abs(m['pearson_coef']):.4f}</td>
                <td>{m['pearson_p']:.6f}</td>
                <td>{m['significance']}</td>
                <td>{m['spearman_coef']:.4f}</td>
                <td>{m['cpu_mean']:.2f}%</td>
                <td>{m['unique_values']}</td>
            </tr>
"""
            
            html += """
        </table>
"""
        
        html += """
        <h2>Legend</h2>
        <ul>
            <li><strong>|Pearson|:</strong> Absolute value of Pearson correlation coefficient (0-1)</li>
            <li><strong>P-value:</strong> Statistical significance (< 0.05 is significant)</li>
            <li><strong>Significance:</strong> Correlation strength category</li>
            <li><strong>CPU Mean:</strong> Average CPU usage during trace collection</li>
            <li class="sig-high">Green background: Highly significant (p < 0.01)</li>
            <li class="sig-medium">Yellow background: Significant (p < 0.05)</li>
            <li class="sig-low">Red background: Not significant (p >= 0.05)</li>
        </ul>
    </div>
</body>
</html>
"""
        
        with open(output_path, 'w', encoding='utf-8') as f:
            f.write(html)
        
        print(f"[+] HTML report saved: {output_path}")
    
    def generate_full_report(self, output_dir: str = None):
        """Generate all report formats"""
        if output_dir is None:
            output_dir = self.summary_dir.parent / 'comparison_reports'
        
        output_dir = Path(output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)
        
        print("\n" + "="*70)
        print("Generating Comparison Report")
        print("="*70)
        
        if not self.results:
            print("[-] No results available. Please run algorithm tests first.")
            return
        
        # 1. Text summary
        summary_text = self.generate_summary_table()
        print(summary_text)
        
        text_path = output_dir / 'summary.txt'
        with open(text_path, 'w', encoding='utf-8') as f:
            f.write(summary_text)
        print(f"\n[+] Text summary saved: {text_path}")
        
        # 2. Visualization charts
        self.plot_comparison_charts(output_dir)
        
        # 3. HTML report
        html_path = output_dir / 'report.html'
        self.generate_html_report(html_path)
        
        # 4. JSON export
        json_path = output_dir / 'full_data.json'
        with open(json_path, 'w') as f:
            json.dump(self.results, f, indent=2)
        print(f"[+] JSON data exported: {json_path}")
        
        print("\n" + "="*70)
        print("Report Generation Complete")
        print("="*70)
        print(f"Output directory: {output_dir}")
        print(f"\nView HTML report:")
        print(f"  file://{html_path.absolute()}")
        print("="*70)


def main():
    """Main function"""
    import argparse
    
    parser = argparse.ArgumentParser(description='Generate comparison report from test results')
    parser.add_argument('--summary-dir', '-s', type=str,
                       help='Summary results directory (default: ./summary_results)')
    parser.add_argument('--output-dir', '-o', type=str,
                       help='Output directory for reports (default: ./comparison_reports)')
    
    args = parser.parse_args()
    
    generator = ComparisonReportGenerator(summary_dir=args.summary_dir)
    generator.generate_full_report(output_dir=args.output_dir)


if __name__ == '__main__':
    main()
