"""Model evaluation utilities for TLS traffic classification."""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import (
    accuracy_score, precision_score, recall_score, f1_score,
    confusion_matrix, classification_report, roc_auc_score,
    roc_curve, precision_recall_curve
)
from typing import Dict, Tuple, Any


class ModelEvaluator:
    """Evaluate ML model performance for TLS traffic classification."""
    
    def __init__(self):
        self.metrics = {}
        
    def evaluate_model(self, y_true: np.ndarray, y_pred: np.ndarray, 
                      y_proba: np.ndarray = None) -> Dict[str, float]:
        """Comprehensive model evaluation."""
        # Basic metrics
        accuracy = accuracy_score(y_true, y_pred)
        precision = precision_score(y_true, y_pred, average='binary')
        recall = recall_score(y_true, y_pred, average='binary')
        f1 = f1_score(y_true, y_pred, average='binary')
        
        # Store metrics
        self.metrics = {
            'accuracy': accuracy,
            'precision': precision,
            'recall': recall,
            'f1_score': f1
        }
        
        # Add AUC if probabilities are provided
        if y_proba is not None:
            auc = roc_auc_score(y_true, y_proba[:, 1])
            self.metrics['auc_roc'] = auc
        
        return self.metrics
    
    def get_confusion_matrix(self, y_true: np.ndarray, y_pred: np.ndarray) -> np.ndarray:
        """Generate confusion matrix."""
        return confusion_matrix(y_true, y_pred)
    
    def get_classification_report(self, y_true: np.ndarray, y_pred: np.ndarray) -> str:
        """Generate detailed classification report."""
        unique_classes = np.unique(np.concatenate([y_true, y_pred]))
        if len(unique_classes) == 1:
            if unique_classes[0] == 0:
                target_names = ['Benign']
            else:
                target_names = ['Malicious']
        else:
            target_names = ['Benign', 'Malicious']
        
        return classification_report(y_true, y_pred, target_names=target_names)
    
    def plot_confusion_matrix(self, y_true: np.ndarray, y_pred: np.ndarray, 
                             save_path: str = None) -> None:
        """Plot confusion matrix."""
        cm = self.get_confusion_matrix(y_true, y_pred)
        
        plt.figure(figsize=(8, 6))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                   xticklabels=['Benign', 'Malicious'],
                   yticklabels=['Benign', 'Malicious'])
        plt.title('Confusion Matrix')
        plt.ylabel('True Label')
        plt.xlabel('Predicted Label')
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
        else:
            plt.show()
        
        plt.close()
    
    def plot_roc_curve(self, y_true: np.ndarray, y_proba: np.ndarray, 
                      save_path: str = None) -> Tuple[float, np.ndarray, np.ndarray]:
        """Plot ROC curve."""
        fpr, tpr, thresholds = roc_curve(y_true, y_proba[:, 1])
        auc_score = roc_auc_score(y_true, y_proba[:, 1])
        
        plt.figure(figsize=(8, 6))
        plt.plot(fpr, tpr, color='darkorange', lw=2,
                label=f'ROC curve (AUC = {auc_score:.2f})')
        plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('Receiver Operating Characteristic (ROC) Curve')
        plt.legend(loc="lower right")
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
        else:
            plt.show()
        
        plt.close()
        return auc_score, fpr, tpr
    
    def plot_precision_recall_curve(self, y_true: np.ndarray, y_proba: np.ndarray,
                                   save_path: str = None) -> Tuple[float, np.ndarray, np.ndarray]:
        """Plot Precision-Recall curve."""
        precision, recall, thresholds = precision_recall_curve(y_true, y_proba[:, 1])
        pr_auc = np.trapz(precision, recall)
        
        plt.figure(figsize=(8, 6))
        plt.plot(recall, precision, color='blue', lw=2,
                label=f'PR curve (AUC = {pr_auc:.2f})')
        plt.xlim([0.0, 1.0])
        plt.ylim([0.0, 1.05])
        plt.xlabel('Recall')
        plt.ylabel('Precision')
        plt.title('Precision-Recall Curve')
        plt.legend(loc="lower left")
        
        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
        else:
            plt.show()
        
        plt.close()
        return pr_auc, precision, recall
    
    def print_evaluation_summary(self, y_true: np.ndarray, y_pred: np.ndarray,
                                y_proba: np.ndarray = None) -> None:
        """Print comprehensive evaluation summary."""
        print("=" * 60)
        print("TLS TRAFFIC CLASSIFICATION MODEL EVALUATION")
        print("=" * 60)
        
        # Print metrics
        metrics = self.evaluate_model(y_true, y_pred, y_proba)
        print(f"Accuracy:  {metrics['accuracy']:.4f}")
        print(f"Precision: {metrics['precision']:.4f}")
        print(f"Recall:    {metrics['recall']:.4f}")
        print(f"F1-Score:  {metrics['f1_score']:.4f}")
        
        if 'auc_roc' in metrics:
            print(f"AUC-ROC:   {metrics['auc_roc']:.4f}")
        
        print("\n" + "-" * 60)
        print("DETAILED CLASSIFICATION REPORT")
        print("-" * 60)
        print(self.get_classification_report(y_true, y_pred))
        
        print("\n" + "-" * 60)
        print("CONFUSION MATRIX")
        print("-" * 60)
        cm = self.get_confusion_matrix(y_true, y_pred)
        
        if cm.shape == (1, 1):
            # Only one class present
            if np.unique(y_true)[0] == 0:
                print(f"True Negative:  {cm[0, 0]}")
                print("False Positive: 0")
                print("False Negative: 0")
                print("True Positive:  0")
            else:
                print("True Negative:  0")
                print("False Positive: 0")
                print("False Negative: 0")
                print(f"True Positive:  {cm[0, 0]}")
        else:
            print(f"True Negative:  {cm[0, 0]}")
            print(f"False Positive: {cm[0, 1]}")
            print(f"False Negative: {cm[1, 0]}")
            print(f"True Positive:  {cm[1, 1]}")
        print("=" * 60)