#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Template Attack Template Attack
"""

import numpy as np
import matplotlib.pyplot as plt
import time
from typing import Dict, Any, List, Tuple
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.mixture import GaussianMixture
try:
    from .base_side_channel_attack import TemplateBasedAttack, AttackResult
except ImportError:
    from base_side_channel_attack import TemplateBasedAttack, AttackResult


class TemplateAttack(TemplateBasedAttack):
    """Template Attack"""
    
    def __init__(self, intermediate_value_func=None, leakage_model_func=None):
        """
        InitializeTemplate Attack
        
        Args:
            intermediate_value_func: Intermediate valueCalculateFunction func(plaintext, key, hypothesis) -> intermediate_value
            leakage_model_func: 泄漏模型Function func(intermediate_value) -> leakage_value
        """
        super().__init__("Template")
        self.profiling_ratio = 0.7  # 用于建模的Data比例
        self.lda = None
        self.hypothesis_range = 256  # False设范围
        
        # 优化Parameters
        self.min_samples_per_class = 10  # EachClass别最少样本数
        self.max_lda_components = 20     # LDA最大组件数
        self.regularization_factor = 1e-6  # 正则化因子
        self.feature_selection_ratio = 0.8  # 特征选择比例
        
        # 回调Function接口
        self.intermediate_value_func = intermediate_value_func
        self.leakage_model_func = leakage_model_func or self._default_leakage_model
        
    def set_intermediate_value_function(self, func):
        """SettingsIntermediate valueCalculateFunction"""
        self.intermediate_value_func = func
        
    def set_leakage_model_function(self, func):
        """Settings泄漏模型Function"""
        self.leakage_model_func = func
        
    def _default_leakage_model(self, intermediate_value):
        """Default泄漏模型：汉明权重"""
        if isinstance(intermediate_value, (int, np.integer)):
            return bin(intermediate_value).count('1')
        elif isinstance(intermediate_value, (list, np.ndarray)):
            return sum(bin(val).count('1') for val in intermediate_value)
        else:
            raise ValueError(f"Unsupported intermediate value type: {type(intermediate_value)}")
    
    def _safe_covariance(self, traces):
        """安全Calculate协方差Matrix，Avoid数值问题"""
        if len(traces) < self.min_samples_per_class:
            # If样本太少，Return单位Matrix
            return np.eye(traces.shape[1]) * self.regularization_factor
        
        try:
            # Calculate协方差Matrix
            cov = np.cov(traces.T)
            
            # Use收缩估计器改善协方差Matrix估计
            from sklearn.covariance import LedoitWolf
            lw = LedoitWolf()
            cov_shrunk = lw.fit(traces).covariance_
            
            # Add对角正则化Ensure正定性
            regularization = np.eye(traces.shape[1]) * self.regularization_factor
            cov_regularized = cov_shrunk + regularization
            
            # Check条件数，If太大则增加正则化
            condition_number = np.linalg.cond(cov_regularized)
            if condition_number > 1e12:
                additional_reg = np.eye(traces.shape[1]) * (self.regularization_factor * 10)
                cov_regularized += additional_reg
                self.logger.warning(f"协方差Matrix条件数过大 ({condition_number:.2e})，增加正则化")
            
            return cov_regularized
            
        except Exception as e:
            self.logger.warning(f"协方差CalculateFailed: {e}，Use单位Matrix")
            return np.eye(traces.shape[1]) * self.regularization_factor
        
    def attack(self, target_byte: int = 0, num_templates: int = None, **kwargs) -> AttackResult:
        """
        ExecuteTemplate Attack
        
{{ ... }}
        Args:
            target_byte: 目标字节位置
            num_templates: 模板数量
            **kwargs: 其他Parameters
            
        Returns:
            AttackResult
        """
        start_time = time.time()
        
        if num_templates is not None:
            self.num_templates = num_templates
            
        if not self.validate_data():
            return AttackResult(
                attack_type="Template",
                target_byte=target_byte,
                execution_time=0,
                success=False,
                confidence=0.0
            )
        
        try:
            # 分割Data：建模阶段AndAttack阶段
            profiling_data, attack_data = self._split_data()
            
            # 建模阶段：Create模板
            templates = self._create_templates_from_data(profiling_data, target_byte)
            
            # Attack阶段：Use模板进行分Class
            predictions, probabilities = self._classify_with_templates(
                templates, attack_data, target_byte
            )
            
            # 评估AttackResult
            success_rate = self._evaluate_predictions(predictions, attack_data, target_byte)
            
            # CalculateEachFalse设的平均概率
            avg_probabilities = np.mean(probabilities, axis=0)
            
            # 找To最可能的KeyFalse设
            best_hypothesis = int(np.argmax(avg_probabilities))
            
            # Generate前n个候选
            top_n = kwargs.get('top_n', min(256, self.hypothesis_range))
            sorted_indices = np.argsort(avg_probabilities)[::-1]  # 降序排列
            top_candidates = [
                (int(idx), float(avg_probabilities[idx])) 
                for idx in sorted_indices[:top_n]
            ]
            
            execution_time = time.time() - start_time
            
            # Calculate置信度
            confidence = success_rate
            success = confidence > 0.5  # Template Attack通常Require较高的Success率
            
            self.logger.info(f"Template Attack completed:")
            self.logger.info(f"  - Templates created: {len(templates)}")
            self.logger.info(f"  - Success rate: {success_rate:.4f}")
            if isinstance(best_hypothesis, (int, np.integer)):
                self.logger.info(f"  - Best hypothesis: 0x{best_hypothesis:02x}")
            else:
                self.logger.info(f"  - Best hypothesis: {best_hypothesis}")
            self.logger.info(f"  - Confidence: {confidence:.4f}")
            self.logger.info(f"  - Top candidates saved: {len(top_candidates)}")
            
            return AttackResult(
                attack_type="Template",
                target_byte=target_byte,
                execution_time=execution_time,
                success=success,
                confidence=confidence,
                best_hypothesis=best_hypothesis,
                top_candidates=top_candidates,
                metadata={
                    'templates': templates,
                    'predictions': predictions,
                    'probabilities': probabilities,
                    'success_rate': success_rate,
                    'profiling_size': len(profiling_data['traces']),
                    'attack_size': len(attack_data['traces']),
                    'avg_probabilities': avg_probabilities
                }
            )
            
        except Exception as e:
            self.logger.error(f"Template attack failed: {e}")
            return AttackResult(
                attack_type="Template",
                target_byte=target_byte,
                execution_time=time.time() - start_time,
                success=False,
                confidence=0.0
            )
    
    def _split_data(self) -> Tuple[Dict[str, np.ndarray], Dict[str, np.ndarray]]:
        """分割DataFor建模AndAttack阶段
        
        Template Attack的正确分割方式：
        - 建模阶段：Use已知Key的Trace建立模板
        - Attack阶段：UseNot同Key的Trace进行Attack
        """
        if self.keys is None:
            raise ValueError("Template attack requires keys for proper data splitting")
        
        n_traces = len(self.traces)
        split_point = int(n_traces * self.profiling_ratio)
        
        # CheckYesNoAllTraceUse相同Key
        unique_keys = np.unique(self.keys.reshape(len(self.keys), -1), axis=0)
        
        if len(unique_keys) == 1:
            # AllTraceUse相同Key - 随机分割用于交叉Verify
            indices = np.random.permutation(n_traces)
            profiling_indices = indices[:split_point]
            attack_indices = indices[split_point:]
            
            self.logger.warning("All traces use the same key - using random split for cross-validation")
        else:
            # HasNot同Key的Trace - 按Key分割
            # 建模阶段Use前面的Trace，Attack阶段Use后面的Trace
            profiling_indices = np.arange(split_point)
            attack_indices = np.arange(split_point, n_traces)
        
        profiling_data = {
            'traces': self.traces[profiling_indices],
            'plaintexts': self.plaintexts[profiling_indices],
            'keys': self.keys[profiling_indices]
        }
        
        attack_data = {
            'traces': self.traces[attack_indices],
            'plaintexts': self.plaintexts[attack_indices],
            'keys': self.keys[attack_indices] if len(attack_indices) > 0 else None
        }
        
        return profiling_data, attack_data
    
    def _create_templates_from_data(self, profiling_data: Dict[str, np.ndarray], 
                                  target_byte: int) -> Dict[int, Dict[str, Any]]:
        """From建模DataCreate模板"""
        traces = profiling_data['traces']
        plaintexts = profiling_data['plaintexts']
        keys = profiling_data['keys']
        
        if keys is None:
            raise ValueError("Template attack requires known keys for profiling phase")
            
        if self.intermediate_value_func is None:
            raise ValueError("Intermediate value function not set. Use set_intermediate_value_function() first.")
        
        # 获取目标字节Data
        plaintext_bytes = self._get_target_data_from_array(plaintexts, target_byte)
        key_bytes = self._get_target_data_from_array(keys, target_byte)
        
        # Use回调FunctionCalculateIntermediate value
        intermediate_values = []
        for pt, k in zip(plaintext_bytes, key_bytes):
            # For建模阶段，hypothesisParameters通常NotUse，传入NoneOr0
            intermediate_val = self.intermediate_value_func(pt, k, None)
            intermediate_values.append(intermediate_val)
        intermediate_values = np.array(intermediate_values)
        
        # Use泄漏模型FunctionCalculate泄漏值
        leakage_values = []
        for iv in intermediate_values:
            leakage_val = self.leakage_model_func(iv)
            leakage_values.append(leakage_val)
        leakage_values = np.array(leakage_values)
        
        # 特征选择：选择方差最大的特征
        if traces.shape[1] > 100:  # 只HasIn特征数量较多时才进行特征选择
            feature_variances = np.var(traces, axis=0)
            n_features_to_select = int(traces.shape[1] * self.feature_selection_ratio)
            selected_features = np.argsort(feature_variances)[-n_features_to_select:]
            traces = traces[:, selected_features]
            self.selected_features = selected_features
            self.logger.info(f"特征选择：From {traces.shape[1]} 个特征中选择了 {n_features_to_select} 个")
        
        # Use优化的LDA进行降维
        unique_leakage_values = np.unique(leakage_values)
        n_classes = len(unique_leakage_values)
        
        # CheckEachClass别的样本数量
        class_counts = {val: np.sum(leakage_values == val) for val in unique_leakage_values}
        min_samples = min(class_counts.values())
        
        if min_samples < self.min_samples_per_class:
            self.logger.warning(f"某些Class别样本数量Not足 (最少: {min_samples})，可能影响模板质量")
        
        # 动态调整LDA组件数
        max_components = min(
            self.max_lda_components,
            traces.shape[1] - 1,
            max(1, n_classes - 1),
            max(1, min_samples - 1)  # Ensure至少For1，且Not超过最小Class别样本数
        )
        
        # Ensure组件数至少For1
        max_components = max(1, max_components)
        
        self.lda = LinearDiscriminantAnalysis(
            n_components=max_components,
            solver='svd',  # UseSVD求解器提高数值稳定性
            shrinkage=None  # NotUse收缩，因For我们已经In协方差Calculate中Process了
        )
        
        try:
            traces_lda = self.lda.fit_transform(traces, leakage_values)
            self.logger.info(f"LDA降维：{traces.shape[1]} → {traces_lda.shape[1]} 维")
        except Exception as e:
            self.logger.error(f"LDA降维Failed: {e}，Use原始特征")
            traces_lda = traces
        
        # ForEach泄漏值Create模板
        templates = {}
        
        for leakage_val in unique_leakage_values:
            mask = (leakage_values == leakage_val)
            if np.sum(mask) > 0:
                group_traces = traces_lda[mask]
                templates[leakage_val] = {
                    'mean': np.mean(group_traces, axis=0),
                    'cov': self._safe_covariance(group_traces),
                    'size': len(group_traces)
                }
        
        return templates
    
    def _get_target_data_from_array(self, plaintexts: np.ndarray, target_byte: int) -> np.ndarray:
        """FromPlaintextArray获取目标字节Data"""
        plaintext_ints = self.convert_bytes_to_int(plaintexts)
        
        if len(plaintext_ints.shape) > 1:
            if target_byte >= plaintext_ints.shape[1]:
                raise ValueError(f"Target byte {target_byte} exceeds data width {plaintext_ints.shape[1]}")
            return plaintext_ints[:, target_byte]
        else:
            if target_byte > 0:
                raise ValueError(f"Target byte {target_byte} not available in single-byte data")
            return plaintext_ints
    
    def _classify_with_templates(self, templates: Dict[int, Dict[str, Any]], 
                               attack_data: Dict[str, np.ndarray], 
                               target_byte: int) -> Tuple[np.ndarray, np.ndarray]:
        """Use模板进行分Class"""
        traces = attack_data['traces']
        plaintexts = attack_data['plaintexts']
        
        if self.intermediate_value_func is None:
            raise ValueError("Intermediate value function not set. Use set_intermediate_value_function() first.")
        
        # 应用特征选择（IfIn训练时Use了）
        if hasattr(self, 'selected_features'):
            traces = traces[:, self.selected_features]
        
        # Use训练好的LDA进行降维
        try:
            traces_lda = self.lda.transform(traces)
        except Exception as e:
            self.logger.error(f"LDA变换Failed: {e}，Use原始特征")
            traces_lda = traces
        
        # 获取目标字节Data
        plaintext_bytes = self._get_target_data_from_array(plaintexts, target_byte)
        
        predictions = []
        probabilities = np.zeros((len(traces), self.hypothesis_range))
        
        for i, trace in enumerate(traces_lda):
            log_probabilities = []
            
            # 对Each可能的False设Calculate对数概率（Avoid数值下溢）
            for hypothesis in range(self.hypothesis_range):
                # Use回调FunctionCalculate该False设下的Intermediate value
                intermediate_val = self.intermediate_value_func(plaintext_bytes[i], None, hypothesis)
                
                # Use泄漏模型FunctionCalculate泄漏值
                leakage_val = self.leakage_model_func(intermediate_val)
                
                # Calculate对数概率
                if leakage_val in templates:
                    template = templates[leakage_val]
                    log_prob = self._multivariate_normal_log_pdf(trace, template['mean'], template['cov'])
                else:
                    log_prob = -23.0  # log(1e-10)
                
                log_probabilities.append(log_prob)
            
            # Convert回概率Empty间并Normalization
            log_probabilities = np.array(log_probabilities)
            
            # 数值稳定的softmax
            max_log_prob = np.max(log_probabilities)
            exp_probs = np.exp(log_probabilities - max_log_prob)
            trace_probabilities = exp_probs / np.sum(exp_probs)
            
            probabilities[i] = trace_probabilities
            
            # 预测最可能的False设
            predictions.append(np.argmax(trace_probabilities))
        
        return np.array(predictions), probabilities
    
    def _multivariate_normal_log_pdf(self, x: np.ndarray, mean: np.ndarray, cov: np.ndarray) -> float:
        """Calculate多元正态分布的对数概率密度（数值稳定Version）"""
        try:
            diff = x - mean
            
            # UseCholesky分解Calculate逆And行列式
            try:
                L = np.linalg.cholesky(cov)
                # 解线性方程组 L * y = diff
                y = np.linalg.solve(L, diff)
                # Calculate二次型 diff^T * cov^(-1) * diff = y^T * y
                quadratic_form = np.dot(y, y)
                # Calculate对数行列式 log|cov| = 2 * sum(log(diag(L)))
                log_det = 2.0 * np.sum(np.log(np.diag(L)))
            except np.linalg.LinAlgError:
                # IfCholesky分解Failed，Use伪逆
                cov_inv = np.linalg.pinv(cov)
                quadratic_form = np.dot(diff, np.dot(cov_inv, diff))
                sign, log_det = np.linalg.slogdet(cov)
                if sign <= 0:
                    log_det = 0  # Process奇异Matrix
            
            # Calculate对数概率密度
            k = len(x)  # 维数
            log_prob = -0.5 * (k * np.log(2 * np.pi) + log_det + quadratic_form)
            
            # 数值边界Check
            if np.isnan(log_prob) or np.isinf(log_prob):
                return -50.0  # 非常小的对数概率
            
            return max(log_prob, -50.0)  # 限制最小值
            
        except Exception:
            return -50.0  # CalculateFailed时Return很小的对数概率
    
    def _multivariate_normal_pdf(self, x: np.ndarray, mean: np.ndarray, cov: np.ndarray) -> float:
        """Calculate多元正态分布的概率密度（数值稳定Version）"""
        try:
            diff = x - mean
            
            # Use伪逆Avoid奇异Matrix问题
            inv_cov = np.linalg.pinv(cov)
            
            # Calculate马哈拉诺比斯距离
            mahalanobis_dist = np.dot(diff, np.dot(inv_cov, diff))
            
            # Use对数Empty间CalculateAvoid数值下溢
            log_prob = -0.5 * mahalanobis_dist
            
            # CalculateNormalization常数（In对数Empty间）
            sign, logdet = np.linalg.slogdet(cov)
            if sign <= 0:
                # If协方差MatrixNotYes正定的，UseDefault value
                return 1e-10
            
            log_normalization = -0.5 * (len(mean) * np.log(2 * np.pi) + logdet)
            
            # Return概率（From对数Empty间Convert）
            log_total = log_prob + log_normalization
            
            # Prevent数值溢出
            if log_total > 700:  # exp(700) 接近 float64 的上限
                return 1.0
            elif log_total < -700:  # exp(-700) 接近 0
                return 1e-10
            else:
                return np.exp(log_total)
                
        except Exception as e:
            # IfCalculateFailed，Return很小的概率
            return 1e-10
    
    def _evaluate_predictions(self, predictions: np.ndarray, 
                            attack_data: Dict[str, np.ndarray], 
                            target_byte: int) -> float:
        """评估预测Result"""
        if attack_data['keys'] is None:
            # If没HasTrue实Key，Use一致性作For评估标准
            unique_predictions, counts = np.unique(predictions, return_counts=True)
            max_count = np.max(counts)
            return max_count / len(predictions)
        else:
            # IfHasTrue实Key，Calculate准确率
            true_keys = self._get_target_data_from_array(attack_data['keys'], target_byte)
            correct_predictions = np.sum(predictions == true_keys)
            return correct_predictions / len(predictions)
    
    def _find_best_hypothesis(self, probabilities: np.ndarray) -> int:
        """找To最佳False设"""
        # CalculateEachFalse设的平均概率
        avg_probabilities = np.mean(probabilities, axis=0)
        return np.argmax(avg_probabilities)
    
    def plot_results(self, result: AttackResult) -> None:
        """绘制Template AttackResult"""
        if not result.success:
            self.logger.warning("No valid results to plot")
            return
        
        probabilities = result.metadata.get('probabilities')
        predictions = result.metadata.get('predictions')
        
        if probabilities is None or predictions is None:
            self.logger.warning("No probability data to plot")
            return
        
        fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 10))
        
        # 绘制概率热图
        im1 = ax1.imshow(probabilities.T, aspect='auto', cmap='viridis', interpolation='nearest')
        ax1.set_title('Template Attack - Probability Heatmap')
        ax1.set_xlabel('Attack Traces')
        ax1.set_ylabel('Key Hypothesis')
        plt.colorbar(im1, ax=ax1, label='Probability')
        
        # 绘制平均概率
        avg_probabilities = np.mean(probabilities, axis=0)
        x_range = min(self.hypothesis_range, len(avg_probabilities))
        ax2.bar(range(x_range), avg_probabilities[:x_range], color='skyblue', alpha=0.7)
        best_hypothesis = result.best_hypothesis
        if isinstance(best_hypothesis, (int, np.integer)) and 0 <= best_hypothesis < x_range:
            ax2.bar(best_hypothesis, avg_probabilities[best_hypothesis], 
                   color='red', alpha=0.8, label=f'Best: 0x{best_hypothesis:02x}')
        ax2.set_title('Average Probability per Key Hypothesis')
        ax2.set_xlabel('Key Hypothesis')
        ax2.set_ylabel('Average Probability')
        ax2.legend()
        ax2.grid(True, alpha=0.3)
        
        # 绘制预测分布
        unique_predictions, counts = np.unique(predictions, return_counts=True)
        ax3.bar(unique_predictions, counts, color='lightgreen', alpha=0.7)
        ax3.set_title('Prediction Distribution')
        ax3.set_xlabel('Predicted Key Hypothesis')
        ax3.set_ylabel('Count')
        ax3.grid(True, alpha=0.3)
        
        # 绘制置信度随Trace数量的变化
        cumulative_confidence = []
        for i in range(1, len(probabilities) + 1):
            partial_probs = probabilities[:i]
            partial_avg = np.mean(partial_probs, axis=0)
            confidence = np.max(partial_avg)
            cumulative_confidence.append(confidence)
        
        ax4.plot(range(1, len(cumulative_confidence) + 1), cumulative_confidence, 'b-', linewidth=2)
        ax4.set_title('Confidence vs Number of Traces')
        ax4.set_xlabel('Number of Traces')
        ax4.set_ylabel('Confidence')
        ax4.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()
        
    @staticmethod
    def create_algorithm_template_attack(algorithm: str, target_operation: str = "sbox_output", 
                                       leakage_model: str = "hamming_weight"):
        """
        CreateAlgorithm特定的Template Attack实例
        
        Args:
            algorithm: Algorithm名称 (aes, sm4, rsa, ecc, etc.)
            target_operation: 目标操作 (sbox_output, sbox_input, etc.)
            leakage_model: 泄漏模型 (hamming_weight, identity, etc.)
            
        Returns:
            Configuration好的Template Attack实例
        """
        try:
            # 动态导入Algorithm模型
            module_path = f"algorithm_specific_attacks.{algorithm}.{algorithm}_models"
            try:
                from importlib import import_module
                models_module = import_module(module_path)
            except ImportError:
                # Try相对导入
                module_path = f"..algorithm_specific_attacks.{algorithm}.{algorithm}_models"
                models_module = import_module(module_path, package=__name__)
            
            # 获取模型Class
            model_class_name = f"{algorithm.upper()}LeakageModels"
            if not hasattr(models_module, model_class_name):
                raise ValueError(f"找NotTo模型Class: {model_class_name}")
            
            model_class = getattr(models_module, model_class_name)
            
            # 获取可用的泄漏模型And目标操作
            available_models = model_class.get_leakage_models()
            available_operations = model_class.get_target_operations()
            
            if target_operation not in available_operations:
                raise ValueError(f"NotSupport的目标操作: {target_operation}，可用操作: {available_operations}")
            
            # CreateIntermediate valueCalculateFunction
            def intermediate_value_func(plaintext, key, hypothesis):
                if target_operation == "sbox_output":
                    if key is not None:
                        return model_class.sbox_output_model(plaintext, key)
                    else:
                        return model_class.sbox_output_model(plaintext, hypothesis)
                elif target_operation == "sbox_input":
                    if key is not None:
                        return model_class.sbox_input_xor_model(plaintext, key)
                    else:
                        return model_class.sbox_input_xor_model(plaintext, hypothesis)
                elif target_operation == "round_key_xor":
                    if key is not None:
                        return model_class.round_key_xor_model(plaintext, key)
                    else:
                        return model_class.round_key_xor_model(plaintext, hypothesis)
                elif target_operation in ["first_round", "key_schedule", "mixcolumns"]:
                    # 这些操作通常MappingTo sbox_output
                    if key is not None:
                        return model_class.sbox_output_model(plaintext, key)
                    else:
                        return model_class.sbox_output_model(plaintext, hypothesis)
                else:
                    raise ValueError(f"未实现的目标操作: {target_operation}")
            
            # Create泄漏模型Function
            def leakage_model_func(intermediate_value):
                if leakage_model == "hamming_weight":
                    return model_class.hamming_weight(intermediate_value)
                elif leakage_model == "identity":
                    return intermediate_value
                elif leakage_model in available_models:
                    # Use模型Class中定义的泄漏模型
                    model_func = available_models[leakage_model]
                    if callable(model_func):
                        return model_func(intermediate_value)
                    else:
                        return intermediate_value
                else:
                    # DefaultUse汉明权重
                    return model_class.hamming_weight(intermediate_value)
            
            return TemplateAttack(
                intermediate_value_func=intermediate_value_func,
                leakage_model_func=leakage_model_func
            )
            
        except Exception as e:
            raise RuntimeError(f"Create {algorithm} Template AttackFailed: {e}")
    
