#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
SPA (Simple Power Analysis) SimplePower AnalysisAttack
"""

import numpy as np
import matplotlib.pyplot as plt
import time
from typing import Dict, Any, List, Tuple
try:
    from .base_side_channel_attack import BaseSideChannelAttack, AttackResult
except ImportError:
    from base_side_channel_attack import BaseSideChannelAttack, AttackResult


class SPAAttack(BaseSideChannelAttack):
    """SimplePower AnalysisAttack"""
    
    def __init__(self):
        super().__init__("SPA")
        self.pattern_threshold = 0.8  # 模式Match阈值
        
    def attack(self, target_byte: int = 0, **kwargs) -> AttackResult:
        """
        ExecuteSPAAttack
        
        Args:
            target_byte: 目标字节位置
            **kwargs: 其他Parameters
            
        Returns:
            AttackResult
        """
        start_time = time.time()
        
        if not self.validate_data():
            return AttackResult(
                attack_type="SPA",
                target_byte=target_byte,
                execution_time=0,
                success=False,
                confidence=0.0
            )
        
        try:
            # AnalyzePowerTrace模式
            patterns = self._analyze_power_patterns()
            
            # 检测异常模式
            anomalies = self._detect_anomalies()
            
            # Extract关键操作特征
            key_features = self._extract_key_features()
            
            # Try恢复KeyInformation
            top_n = kwargs.get('top_n', min(256, self.hypothesis_range))
            recovered_info = self._recover_key_information(patterns, anomalies, key_features, top_n)
            
            execution_time = time.time() - start_time
            
            # 评估AttackSuccess率
            confidence = self._calculate_confidence(patterns, anomalies)
            success = confidence > 0.3  # SPA通常Require较低的阈值
            
            # Extract候选List
            top_candidates = recovered_info.get('candidates', [])
            
            self.logger.info(f"SPA Attack completed:")
            self.logger.info(f"  - Patterns detected: {len(patterns)}")
            self.logger.info(f"  - Anomalies found: {len(anomalies)}")
            self.logger.info(f"  - Key features: {len(key_features)}")
            self.logger.info(f"  - Confidence: {confidence:.4f}")
            self.logger.info(f"  - Top candidates saved: {len(top_candidates)}")
            
            return AttackResult(
                attack_type="SPA",
                target_byte=target_byte,
                execution_time=execution_time,
                success=success,
                confidence=confidence,
                best_hypothesis=recovered_info.get('best_guess'),
                top_candidates=top_candidates,
                metadata={
                    'patterns': patterns,
                    'anomalies': anomalies,
                    'key_features': key_features,
                    'recovered_info': recovered_info,
                    'pattern_threshold': self.pattern_threshold
                }
            )
            
        except Exception as e:
            self.logger.error(f"SPA attack failed: {e}")
            return AttackResult(
                attack_type="SPA",
                target_byte=target_byte,
                execution_time=time.time() - start_time,
                success=False,
                confidence=0.0
            )
    
    def _analyze_power_patterns(self) -> List[Dict[str, Any]]:
        """AnalyzePower模式"""
        patterns = []
        
        # Calculate平均PowerTrace
        mean_trace = np.mean(self.traces, axis=0)
        std_trace = np.std(self.traces, axis=0)
        
        # 寻找显著的Power峰值
        threshold = mean_trace + 2 * std_trace
        peaks = np.where(mean_trace > threshold)[0]
        
        # If没Has找To峰值，Use更宽松的阈值
        if len(peaks) == 0:
            threshold = mean_trace + std_trace
            peaks = np.where(mean_trace > threshold)[0]
        
        for i, peak in enumerate(peaks):
            pattern = {
                'id': i,
                'position': peak,
                'amplitude': mean_trace[peak],
                'width': self._calculate_peak_width(mean_trace, peak),
                'type': 'peak'
            }
            patterns.append(pattern)
        
        # 寻找显著的Power谷值
        valleys = np.where(mean_trace < mean_trace - 2 * std_trace)[0]
        
        for i, valley in enumerate(valleys):
            pattern = {
                'id': len(patterns) + i,
                'position': valley,
                'amplitude': mean_trace[valley],
                'width': self._calculate_peak_width(-mean_trace, valley),
                'type': 'valley'
            }
            patterns.append(pattern)
        
        return patterns
    
    def _calculate_peak_width(self, trace: np.ndarray, peak_pos: int) -> int:
        """Calculate峰值宽度"""
        if peak_pos >= len(trace):
            return 0
        
        peak_value = trace[peak_pos]
        half_max = peak_value / 2
        
        # 向左Search半高点
        left_pos = peak_pos
        while left_pos > 0 and trace[left_pos] > half_max:
            left_pos -= 1
        
        # 向右Search半高点
        right_pos = peak_pos
        while right_pos < len(trace) - 1 and trace[right_pos] > half_max:
            right_pos += 1
        
        return right_pos - left_pos
    
    def _detect_anomalies(self) -> List[Dict[str, Any]]:
        """检测异常模式"""
        anomalies = []
        
        # Calculate每条Trace与平均Trace的差异
        mean_trace = np.mean(self.traces, axis=0)
        
        for i, trace in enumerate(self.traces):
            # Calculate欧几里得距离
            distance = np.linalg.norm(trace - mean_trace)
            
            # If距离超过阈值，标记For异常
            if distance > np.std([np.linalg.norm(t - mean_trace) for t in self.traces]) * 2:
                anomaly = {
                    'trace_id': i,
                    'distance': distance,
                    'max_deviation_point': np.argmax(np.abs(trace - mean_trace)),
                    'max_deviation_value': np.max(np.abs(trace - mean_trace))
                }
                anomalies.append(anomaly)
        
        return anomalies
    
    def _extract_key_features(self) -> List[Dict[str, Any]]:
        """Extract关键特征"""
        features = []
        
        # 特征1：Power变化率
        for i, trace in enumerate(self.traces):
            diff = np.diff(trace)
            max_change_rate = np.max(np.abs(diff))
            max_change_pos = np.argmax(np.abs(diff))
            
            feature = {
                'trace_id': i,
                'type': 'change_rate',
                'max_rate': max_change_rate,
                'position': max_change_pos
            }
            features.append(feature)
        
        # 特征2：频域特征
        for i, trace in enumerate(self.traces):
            fft = np.fft.fft(trace)
            dominant_freq = np.argmax(np.abs(fft[1:len(fft)//2])) + 1
            
            feature = {
                'trace_id': i,
                'type': 'frequency',
                'dominant_frequency': dominant_freq,
                'amplitude': np.abs(fft[dominant_freq])
            }
            features.append(feature)
        
        return features
    
    def _recover_key_information(self, patterns: List[Dict], anomalies: List[Dict], 
                               features: List[Dict], top_n: int = 10) -> Dict[str, Any]:
        """Try恢复KeyInformation"""
        recovered_info = {}
        
        # Based on模式AnalyzeTry恢复
        if patterns:
            # 统计最常见的模式
            pattern_positions = [p['position'] for p in patterns]
            if pattern_positions:
                most_common_pos = max(set(pattern_positions), key=pattern_positions.count)
                recovered_info['suspected_key_operation_position'] = most_common_pos
        
        # Based on异常Analyze
        if anomalies:
            # 异常Trace可能对应特定的Key值
            anomaly_traces = [a['trace_id'] for a in anomalies]
            if anomaly_traces and hasattr(self, 'plaintexts'):
                target_data = self.get_target_data(0)
                suspected_values = [target_data[i] for i in anomaly_traces if i < len(target_data)]
                if suspected_values:
                    recovered_info['suspected_key_bytes'] = list(set(suspected_values))
        
        # 统计AnalyzeGenerate候选List
        candidates = []
        if hasattr(self, 'plaintexts'):
            target_data = self.get_target_data(0)
            # 统计All可能的值及其出现频率
            unique, counts = np.unique(target_data, return_counts=True)
            
            # 按频率SortGenerate候选
            sorted_indices = np.argsort(counts)[::-1]  # 降序
            for idx in sorted_indices[:min(top_n, len(unique))]:
                hypothesis = int(unique[idx])
                score = float(counts[idx]) / len(target_data)  # Normalization频率作For分数
                candidates.append((hypothesis, score))
            
            # 最佳猜测Yes频率最高的
            if candidates:
                recovered_info['best_guess'] = candidates[0][0]
        
        recovered_info['candidates'] = candidates
        return recovered_info
    
    def _calculate_confidence(self, patterns: List[Dict], anomalies: List[Dict]) -> float:
        """CalculateAttack置信度"""
        confidence = 0.0
        
        # Based on模式数量
        if patterns:
            confidence += min(len(patterns) / 10.0, 0.3)
        
        # Based on异常数量
        if anomalies:
            anomaly_ratio = len(anomalies) / len(self.traces)
            confidence += min(anomaly_ratio * 2, 0.4)
        
        # Based onTrace变异性
        trace_std = np.std(self.traces, axis=0)
        variability = np.mean(trace_std)
        mean_traces = np.mean(self.traces)
        if mean_traces != 0:
            confidence += min(variability / mean_traces, 0.3)
        else:
            confidence += 0.1  # Default小的置信度增量
        
        return min(confidence, 1.0)
    
    def plot_results(self, result: AttackResult) -> None:
        """绘制SPAAttackResult"""
        if not result.success:
            self.logger.warning("No valid results to plot")
            return
        
        patterns = result.metadata.get('patterns', [])
        anomalies = result.metadata.get('anomalies', [])
        
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 10))
        
        # 绘制平均PowerTraceAnd模式
        mean_trace = np.mean(self.traces, axis=0)
        ax1.plot(mean_trace, 'b-', linewidth=1, label='Mean Trace')
        
        # 标记检测To的模式
        for pattern in patterns:
            color = 'red' if pattern['type'] == 'peak' else 'green'
            ax1.plot(pattern['position'], pattern['amplitude'], 'o', 
                    color=color, markersize=8, alpha=0.7)
        
        ax1.set_title('SPA Analysis - Power Trace with Detected Patterns')
        ax1.set_xlabel('Sample Points')
        ax1.set_ylabel('Power Consumption')
        ax1.legend()
        ax1.grid(True, alpha=0.3)
        
        # 绘制异常Trace
        ax2.plot(mean_trace, 'b-', linewidth=2, label='Mean Trace', alpha=0.7)
        
        for anomaly in anomalies[:5]:  # 只Display前5个异常
            trace_id = anomaly['trace_id']
            if trace_id < len(self.traces):
                ax2.plot(self.traces[trace_id], 'r-', alpha=0.5, linewidth=1)
        
        ax2.set_title('SPA Analysis - Anomalous Traces')
        ax2.set_xlabel('Sample Points')
        ax2.set_ylabel('Power Consumption')
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        # 绘制Power变化率
        diff_traces = np.diff(self.traces, axis=1)
        mean_diff = np.mean(diff_traces, axis=0)
        ax3.plot(mean_diff, 'g-', linewidth=2)
        ax3.set_title('SPA Analysis - Power Change Rate')
        ax3.set_xlabel('Sample Points')
        ax3.set_ylabel('Change Rate')
        ax3.grid(True, alpha=0.3)
        
        # 绘制模式统计
        if patterns:
            pattern_types = [p['type'] for p in patterns]
            unique_types, counts = np.unique(pattern_types, return_counts=True)
            ax4.bar(unique_types, counts, color=['red', 'green'])
            ax4.set_title('SPA Analysis - Pattern Distribution')
            ax4.set_xlabel('Pattern Type')
            ax4.set_ylabel('Count')
        else:
            ax4.text(0.5, 0.5, 'No patterns detected', ha='center', va='center', 
                    transform=ax4.transAxes, fontsize=12)
            ax4.set_title('SPA Analysis - No Patterns Found')
        
        plt.tight_layout()
        plt.show()
