#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Intermediate value模型与CPUUse率相关性Analyze器

AnalyzeIntermediate value模型CalculateResult与CPUUse率Trace的相关性
"""

import os
import sys
import json
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from typing import Dict, List, Tuple
from scipy import stats


class CorrelationAnalyzer:
    """相关性Analyze器"""
    
    def __init__(self, trace_file: str):
        """
        InitializeAnalyze器
        
        Args:
            trace_file: TraceDataFilePath (.npz)
        """
        self.trace_file = Path(trace_file)
        self.load_data()
    
    def load_data(self):
        """LoadTraceData"""
        if not self.trace_file.exists():
            raise FileNotFoundError(f"Trace file not found: {self.trace_file}")
        
        data = np.load(self.trace_file, allow_pickle=True)
        self.traces = data['traces']
        self.intermediate_values = data['intermediate_values']
        
        # Load metadata
        json_file = self.trace_file.with_suffix('.json')
        if json_file.exists():
            with open(json_file, 'r') as f:
                self.metadata = json.load(f)
        else:
            self.metadata = {}
        
        print(f"Data loaded: {len(self.traces)} traces")
    
    def compute_cpu_statistics(self) -> Dict:
        """CalculateCPUUse率统计Information"""
        # 对每条TraceCalculate平均值
        mean_cpus = []
        for trace in self.traces:
            if isinstance(trace, np.ndarray) and len(trace) > 0:
                mean_cpus.append(np.mean(trace))
        
        mean_cpus = np.array(mean_cpus)
        
        return {
            'mean': float(np.mean(mean_cpus)),
            'std': float(np.std(mean_cpus)),
            'min': float(np.min(mean_cpus)),
            'max': float(np.max(mean_cpus)),
            'median': float(np.median(mean_cpus)),
            'trace_means': mean_cpus
        }
    
    def compute_intermediate_statistics(self) -> Dict:
        """CalculateIntermediate value统计Information"""
        values = np.array([v for v in self.intermediate_values if v is not None])
        
        return {
            'mean': float(np.mean(values)),
            'std': float(np.std(values)),
            'min': float(np.min(values)),
            'max': float(np.max(values)),
            'median': float(np.median(values)),
            'unique_count': len(np.unique(values)),
            'values': values
        }
    
    def compute_correlation(self) -> Dict:
        """Calculate相关系数"""
        cpu_stats = self.compute_cpu_statistics()
        inter_stats = self.compute_intermediate_statistics()
        
        cpu_means = cpu_stats['trace_means']
        inter_values = inter_stats['values']
        
        # Ensure长度一致
        min_len = min(len(cpu_means), len(inter_values))
        cpu_means = cpu_means[:min_len]
        inter_values = inter_values[:min_len]
        
        # CheckDataHas效性
        if min_len < 2:
            print(f"WARNING: Insufficient data for correlation (only {min_len} valid pairs)")
            return {
                'pearson_coefficient': float('nan'),
                'pearson_p_value': float('nan'),
                'spearman_coefficient': float('nan'),
                'spearman_p_value': float('nan'),
                'significance': 'insufficient_data',
                'description': f'Insufficient data ({min_len} pairs)',
                'cpu_statistics': cpu_stats,
                'intermediate_statistics': inter_stats
            }
        
        # Pearson相关系数
        pearson_corr, pearson_p = stats.pearsonr(inter_values, cpu_means)
        
        # Spearman秩相关系数
        spearman_corr, spearman_p = stats.spearmanr(inter_values, cpu_means)
        
        # 判断相关性强度
        abs_corr = abs(pearson_corr)
        if abs_corr > 0.7:
            significance = "very_strong"
            description = "Very Strong Correlation"
        elif abs_corr > 0.5:
            significance = "strong"
            description = "Strong Correlation"
        elif abs_corr > 0.3:
            significance = "moderate"
            description = "Moderate Correlation"
        elif abs_corr > 0.1:
            significance = "weak"
            description = "Weak Correlation"
        else:
            significance = "very_weak"
            description = "Very Weak Correlation"
        
        return {
            'pearson_coefficient': float(pearson_corr),
            'pearson_p_value': float(pearson_p),
            'spearman_coefficient': float(spearman_corr),
            'spearman_p_value': float(spearman_p),
            'significance': significance,
            'description': description,
            'cpu_statistics': cpu_stats,
            'intermediate_statistics': inter_stats
        }
    
    def plot_scatter(self, output_path: str = None):
        """绘制散点图"""
        correlation = self.compute_correlation()
        cpu_means = correlation['cpu_statistics']['trace_means']
        inter_values = correlation['intermediate_statistics']['values']
        
        # Ensure长度一致
        min_len = min(len(cpu_means), len(inter_values))
        cpu_means = cpu_means[:min_len]
        inter_values = inter_values[:min_len]
        
        if min_len < 2:
            print(f"Skipping scatter plot (insufficient data: {min_len} points)")
            return
        
        plt.figure(figsize=(10, 6))
        plt.scatter(inter_values, cpu_means, alpha=0.5, s=20)
        plt.xlabel('Intermediate Value')
        plt.ylabel('Mean CPU Usage (%)')
        plt.title(f"Correlation: r={correlation['pearson_coefficient']:.4f}\n"
                 f"{correlation['description']}")
        plt.grid(True, alpha=0.3)
        
        # Add趋势线
        if len(inter_values) > 1 and np.std(inter_values) > 0:
            z = np.polyfit(inter_values, cpu_means, 1)
            p = np.poly1d(z)
            x_line = np.linspace(inter_values.min(), inter_values.max(), 100)
            plt.plot(x_line, p(x_line), "r--", alpha=0.8, linewidth=2)
        
        plt.tight_layout()
        
        if output_path:
            plt.savefig(output_path, dpi=150, bbox_inches='tight')
            print(f"Scatter plot saved: {output_path}")
        else:
            plt.show()
        
        plt.close()
    
    def plot_histogram(self, output_path: str = None):
        """绘制直方图"""
        correlation = self.compute_correlation()
        inter_values = correlation['intermediate_statistics']['values']
        
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
        
        # Intermediate value分布
        ax1.hist(inter_values, bins=50, alpha=0.7, edgecolor='black')
        ax1.set_xlabel('Intermediate Value')
        ax1.set_ylabel('Frequency')
        ax1.set_title('Distribution of Intermediate Values')
        ax1.grid(True, alpha=0.3)
        
        # CPUUse率分布
        cpu_means = correlation['cpu_statistics']['trace_means']
        ax2.hist(cpu_means, bins=50, alpha=0.7, edgecolor='black', color='orange')
        ax2.set_xlabel('Mean CPU Usage (%)')
        ax2.set_ylabel('Frequency')
        ax2.set_title('Distribution of CPU Usage')
        ax2.grid(True, alpha=0.3)
        
        plt.tight_layout()
        
        if output_path:
            plt.savefig(output_path, dpi=150, bbox_inches='tight')
            print(f"Histogram saved: {output_path}")
        else:
            plt.show()
        
        plt.close()
    
    def generate_report(self, output_dir: str = None):
        """GenerateAnalyze报告"""
        if output_dir:
            output_dir = Path(output_dir)
            output_dir.mkdir(parents=True, exist_ok=True)
        else:
            output_dir = Path('.')
        
        correlation = self.compute_correlation()
        
        # SaveJSON报告
        report_path = output_dir / 'correlation_report.json'
        with open(report_path, 'w') as f:
            # RemovenumpyArray
            report = correlation.copy()
            report['cpu_statistics'].pop('trace_means', None)
            report['intermediate_statistics'].pop('values', None)
            report['metadata'] = self.metadata
            json.dump(report, f, indent=2)
        
        print(f"\n相关性Analyze报告已Save: {report_path}")
        
        # Generate图表
        scatter_path = output_dir / 'scatter_plot.png'
        self.plot_scatter(str(scatter_path))
        
        hist_path = output_dir / 'histogram.png'
        self.plot_histogram(str(hist_path))
        
        # Print摘要
        print("\n" + "="*70)
        print("相关性AnalyzeResult摘要")
        print("="*70)
        print(f"Algorithm: {self.metadata.get('algorithm_name', 'Unknown')}")
        print(f"模型: {self.metadata.get('model_name', 'Unknown')}")
        print(f"Trace数: {len(self.traces)}")
        print(f"\nPearson相关系数: {correlation['pearson_coefficient']:.4f}")
        print(f"P值: {correlation['pearson_p_value']:.6f}")
        print(f"相关性评价: {correlation['description']}")
        print(f"\nCPUUse率统计:")
        print(f"  平均值: {correlation['cpu_statistics']['mean']:.2f}%")
        print(f"  标准差: {correlation['cpu_statistics']['std']:.2f}%")
        print(f"\nIntermediate value统计:")
        print(f"  唯一值数量: {correlation['intermediate_statistics']['unique_count']}")
        print(f"  值范围: [{correlation['intermediate_statistics']['min']}, "
              f"{correlation['intermediate_statistics']['max']}]")
        print("="*70)
        
        return correlation


class ComparisonAnalyzer:
    """Multiple实验对比Analyze器"""
    
    def __init__(self, trace_files: List[str]):
        """
        Initialize对比Analyze器
        
        Args:
            trace_files: MultipleTraceFilePathList
        """
        self.trace_files = [Path(f) for f in trace_files]
        self.analyzers = []
        self.correlations = []
        
        for trace_file in self.trace_files:
            analyzer = CorrelationAnalyzer(str(trace_file))
            correlation = analyzer.compute_correlation()
            self.analyzers.append(analyzer)
            self.correlations.append(correlation)
    
    def plot_comparison(self, output_path: str = None):
        """绘制对比图"""
        names = []
        pearson_corrs = []
        
        for analyzer, corr in zip(self.analyzers, self.correlations):
            model_name = analyzer.metadata.get('model_name', 'Unknown')
            names.append(model_name)
            pearson_corrs.append(abs(corr['pearson_coefficient']))
        
        # Sort
        sorted_indices = np.argsort(pearson_corrs)[::-1]
        names = [names[i] for i in sorted_indices]
        pearson_corrs = [pearson_corrs[i] for i in sorted_indices]
        
        # 颜色编码
        colors = ['green' if c > 0.5 else 'orange' if c > 0.3 else 'red' for c in pearson_corrs]
        
        plt.figure(figsize=(12, 6))
        bars = plt.barh(range(len(names)), pearson_corrs, color=colors, alpha=0.7)
        plt.yticks(range(len(names)), names)
        plt.xlabel('|Pearson Correlation Coefficient|')
        plt.title('Model Correlation Comparison')
        plt.grid(True, alpha=0.3, axis='x')
        
        # Add参考线
        plt.axvline(0.5, color='green', linestyle='--', alpha=0.5, label='Strong (|r|>0.5)')
        plt.axvline(0.3, color='orange', linestyle='--', alpha=0.5, label='Moderate (|r|>0.3)')
        plt.legend()
        
        # Add数值标签
        for i, (name, corr) in enumerate(zip(names, pearson_corrs)):
            plt.text(corr + 0.01, i, f'{corr:.3f}', va='center')
        
        plt.tight_layout()
        
        if output_path:
            plt.savefig(output_path, dpi=150, bbox_inches='tight')
            print(f"Comparison plot saved: {output_path}")
        else:
            plt.show()
        
        plt.close()
    
    def generate_comparison_report(self, output_dir: str = None):
        """Generate对比报告"""
        if output_dir:
            output_dir = Path(output_dir)
            output_dir.mkdir(parents=True, exist_ok=True)
        else:
            output_dir = Path('.')
        
        # Generate对比图
        comparison_path = output_dir / 'comparison_plot.png'
        self.plot_comparison(str(comparison_path))
        
        # Save对比Data
        comparison_data = []
        for analyzer, corr in zip(self.analyzers, self.correlations):
            comparison_data.append({
                'model_name': analyzer.metadata.get('model_name', 'Unknown'),
                'algorithm': analyzer.metadata.get('algorithm', 'Unknown'),
                'pearson_coefficient': corr['pearson_coefficient'],
                'significance': corr['significance'],
                'description': corr['description']
            })
        
        report_path = output_dir / 'comparison_report.json'
        with open(report_path, 'w') as f:
            json.dump(comparison_data, f, indent=2)
        
        print(f"对比报告已Save: {report_path}")
        
        # Print摘要
        print("\n" + "="*70)
        print("模型对比AnalyzeResult")
        print("="*70)
        for data in sorted(comparison_data, key=lambda x: abs(x['pearson_coefficient']), reverse=True):
            print(f"{data['model_name']:<30} r={data['pearson_coefficient']:>7.4f}  {data['description']}")
        print("="*70)


def main():
    """主Function"""
    import argparse
    
    parser = argparse.ArgumentParser(description='Intermediate value模型相关性Analyze器')
    parser.add_argument('--input', '-i', type=str, help='InputTraceFile (.npz)')
    parser.add_argument('--output', '-o', type=str, help='Output directory')
    parser.add_argument('--compare', '-c', nargs='+', help='对比MultipleTraceFile')
    
    args = parser.parse_args()
    
    if args.compare:
        # 对比Analyze
        print(f"对比Analyze {len(args.compare)} 个实验...")
        analyzer = ComparisonAnalyzer(args.compare)
        analyzer.generate_comparison_report(args.output)
    elif args.input:
        # SingleAnalyze
        print(f"AnalyzeTraceFile: {args.input}")
        analyzer = CorrelationAnalyzer(args.input)
        analyzer.generate_report(args.output)
    else:
        parser.print_help()


if __name__ == '__main__':
    main()
