#!/usr/bin/env python3
"""ZenML-compatible TLS Classification Pipeline."""

from zenml import pipeline, step
import sys
from pathlib import Path
from typing import Dict, Any, Tuple, List
import pandas as pd
import numpy as np
import json
import joblib

# Add src to path
sys.path.insert(0, str(Path(__file__).parent / "src"))

from aiops_eta.feature_extractor import TLSFeatureExtractor
from aiops_eta.model import TLSRandomForest
from aiops_eta.evaluate import ModelEvaluator

@step
def validate_and_load_data_step(data_dir: str) -> Tuple[Dict[str, Dict[str, pd.DataFrame]], Dict[str, Any]]:
    """Load and validate Zeek log data."""
    print("📁 Step 1: Loading and validating data...")
    
    data_path = Path(data_dir)
    log_dataframes = {}
    
    # Find all zeek log directories
    zeek_dirs = [d for d in data_path.iterdir() if d.is_dir() and d.name.endswith("_zeek_logs")]
    
    for zeek_dir in zeek_dirs:
        pcap_name = zeek_dir.name.replace("_zeek_logs", "") + ".pcap"
        print(f"Loading logs from {zeek_dir.name}...")
        
        file_dataframes = {}
        for log_file in zeek_dir.glob("*.log"):
            if log_file.name.startswith("#"):
                continue
                
            try:
                if log_file.stem == 'ssl':
                    df = pd.read_csv(
                        log_file, sep='\t', comment='#', skip_blank_lines=True,
                        low_memory=False, skiprows=7, header=None,
                        names=[
                            'ts', 'uid', 'id.orig_h', 'id.orig_p', 'id.resp_h', 'id.resp_p',
                            'version', 'cipher', 'curve', 'server_name', 'resumed', 'last_alert',
                            'next_protocol', 'established', 'ssl_history', 'cert_chain_fps',
                            'client_cert_chain_fps', 'sni_matches_cert', 'validation_status'
                        ]
                    )
                else:
                    df = pd.read_csv(log_file, sep='\t', comment='#', skip_blank_lines=True, low_memory=False)
                
                if not df.empty:
                    file_dataframes[log_file.stem] = df
                    print(f"  Loaded {log_file.name}: {len(df)} records")
            except Exception as e:
                print(f"  Warning: Could not load {log_file.name}: {e}")
        
        if file_dataframes:
            log_dataframes[pcap_name] = file_dataframes
    
    validation_report = {
        "total_pcaps": len(log_dataframes),
        "ssl_logs_available": 0,
        "total_ssl_records": 0,
        "data_quality_issues": [],
        "class_distribution": {"benign": 0, "malicious": 0}
    }
    
    for pcap_name, dataframes in log_dataframes.items():
        if 'ssl' in dataframes:
            validation_report["ssl_logs_available"] += 1
            ssl_df = dataframes['ssl']
            validation_report["total_ssl_records"] += len(ssl_df)
            
            if "normal" in pcap_name.lower() or "benign" in pcap_name.lower():
                validation_report["class_distribution"]["benign"] += len(ssl_df)
            else:
                validation_report["class_distribution"]["malicious"] += len(ssl_df)
        else:
            validation_report["data_quality_issues"].append(f"{pcap_name}: No SSL logs")
    
    print(f"✅ Data validation completed:")
    print(f"  PCAPs processed: {validation_report['total_pcaps']}")
    print(f"  SSL records: {validation_report['total_ssl_records']}")
    print(f"  Class distribution: {validation_report['class_distribution']}")
    
    # Save validation report
    artifacts_dir = Path("zenml_artifacts")
    artifacts_dir.mkdir(exist_ok=True)
    
    validation_report_path = artifacts_dir / "validation_report.json"
    with open(validation_report_path, 'w') as f:
        json.dump(validation_report, f, indent=2, default=str)
    print(f"  💾 Validation report saved to {validation_report_path}")
    
    return log_dataframes, validation_report

@step
def extract_features_step(log_dataframes: Dict[str, Dict[str, pd.DataFrame]]) -> Tuple[pd.DataFrame, np.ndarray, Dict[str, Any]]:
    """Extract and engineer features from Zeek logs."""
    print("🔍 Step 2: Extracting and engineering features...")
    
    extractor = TLSFeatureExtractor()
    all_features = []
    all_labels = []
    
    for pcap_name, dataframes in log_dataframes.items():
        if "normal" in pcap_name.lower() or "benign" in pcap_name.lower():
            label = 0  # Benign
        else:
            label = 1  # Malicious
            
        if 'ssl' in dataframes:
            ssl_df = dataframes['ssl']
            
            try:
                ssl_df['resumed'] = ssl_df['resumed'].map(lambda x: x == 'T')
                ssl_df['established'] = ssl_df['established'].map(lambda x: x == 'T')
                
                features_df = extractor.extract_features(ssl_df)
                feature_columns = extractor.get_feature_columns()
                
                for col in feature_columns:
                    if col not in features_df.columns:
                        features_df[col] = 0
                
                features_subset = features_df[feature_columns].copy()
                all_features.append(features_subset)
                all_labels.extend([label] * len(features_subset))
                
                print(f"  ✅ {pcap_name}: {len(features_subset)} features")
                
            except Exception as e:
                print(f"  ❌ Error processing {pcap_name}: {e}")
                continue
    
    if not all_features:
        raise ValueError("No features could be extracted")
    
    combined_features = pd.concat(all_features, ignore_index=True)
    labels_array = np.array(all_labels)
    
    # Handle categorical features
    categorical_columns = ['src_port_category']
    for col in categorical_columns:
        if col in combined_features.columns:
            dummies = pd.get_dummies(combined_features[col], prefix=col)
            combined_features = pd.concat([combined_features.drop(col, axis=1), dummies], axis=1)
    
    # Remove NaN values
    valid_mask = ~combined_features.isnull().any(axis=1)
    combined_features = combined_features[valid_mask]
    labels_array = labels_array[valid_mask]
    
    # Remove constant features using VarianceThreshold
    from sklearn.feature_selection import VarianceThreshold
    
    # Get original feature names
    original_feature_names = combined_features.columns.tolist()
    
    # Remove constant features
    variance_selector = VarianceThreshold(threshold=0)
    features_no_constant = variance_selector.fit_transform(combined_features)
    
    # Get mask of non-constant features
    non_constant_mask = variance_selector.get_support()
    if non_constant_mask is None:
        non_constant_mask = np.array([True] * len(original_feature_names))
    
    constant_features = [name for name, keep in zip(original_feature_names, non_constant_mask) if not keep]
    non_constant_feature_names = [name for name, keep in zip(original_feature_names, non_constant_mask) if keep]
    
    if constant_features:
        print(f"  Removing {len(constant_features)} constant features: {constant_features}")
    
    # Update combined_features with non-constant features
    combined_features = pd.DataFrame(  # type: ignore
        features_no_constant,
        columns=non_constant_feature_names,
        index=combined_features.index
    )
    
    # Ensure all columns have consistent data types
    for col in combined_features.columns:
        if combined_features[col].dtype == 'object':
            # Convert object columns to int if they contain boolean-like values
            try:
                combined_features[col] = combined_features[col].astype(int)
            except (ValueError, TypeError):
                # If conversion fails, fill NaN with 0 and try again
                combined_features[col] = combined_features[col].fillna(0).astype(int)
    
    # Feature selection
    feature_metadata = {
        "total_features": len(original_feature_names),
        "non_constant_features": len(non_constant_feature_names),
        "feature_names": non_constant_feature_names,
        "samples": len(combined_features),
        "class_distribution": {
            "benign": int(np.sum(labels_array == 0)),
            "malicious": int(np.sum(labels_array == 1))
        },
        "removed_constant_features": constant_features
    }
    
    if len(np.unique(labels_array)) > 1 and len(combined_features.columns) > 0:
        from sklearn.feature_selection import SelectKBest, f_classif
        
        selector = SelectKBest(score_func=f_classif, k=min(20, len(combined_features.columns)))
        selected_features = selector.fit_transform(combined_features, labels_array)
        
        selected_mask = selector.get_support()
        selected_feature_names = combined_features.columns[selected_mask].tolist()
        
        combined_features = pd.DataFrame(  # type: ignore
            selected_features,
            columns=selected_feature_names,
            index=combined_features.index
        )
        
        feature_metadata["selected_features"] = len(selected_feature_names)
        feature_metadata["feature_scores"] = dict(zip(
            selected_feature_names,
            selector.scores_[selected_mask]
        ))
    
    print(f"✅ Feature engineering completed:")
    print(f"  Final features: {len(combined_features.columns)}")
    print(f"  Samples: {len(combined_features)}")
    
    # Save feature metadata
    artifacts_dir = Path("zenml_artifacts")
    artifacts_dir.mkdir(exist_ok=True)
    
    feature_metadata_path = artifacts_dir / "feature_metadata.json"
    with open(feature_metadata_path, 'w') as f:
        json.dump(feature_metadata, f, indent=2, default=str)
    print(f"  💾 Feature metadata saved to {feature_metadata_path}")
    
    return combined_features, labels_array, feature_metadata

@step
def train_model_step(features_df: pd.DataFrame, labels_array: np.ndarray) -> Tuple[Dict[str, Any], Dict[str, Any]]:
    """Train the Random Forest model."""
    print("🤖 Step 3: Training model...")
    
    from sklearn.model_selection import train_test_split
    
    X = features_df.values.astype(float)
    y = labels_array
    feature_names = features_df.columns.tolist()
    
    # Split data
    if len(np.unique(y)) > 1:
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42, stratify=y
        )
    else:
        X_train, X_test, y_train, y_test = train_test_split(
            X, y, test_size=0.2, random_state=42
        )
    
    # Initialize and train model
    model = TLSRandomForest(n_estimators=100, random_state=42)
    model.train(X_train, y_train, feature_names)  # type: ignore
    
    # Get feature importance
    feature_importance = model.get_feature_importance()
    print(f"\n🏆 Top 10 Most Important Features:")
    print(feature_importance.head(10).to_string(index=False))
    
    # Evaluate
    y_pred = model.predict(X_test)  # type: ignore
    y_proba = model.predict_proba(X_test)  # type: ignore
    
    evaluator = ModelEvaluator()
    
    if len(np.unique(y_test)) == 1:
        metrics = {
            'accuracy': float(np.mean(y_pred == y_test)),
            'precision': 0.0,
            'recall': 0.0,
            'f1_score': 0.0,
        }
    else:
        metrics = evaluator.evaluate_model(y_test, y_pred, y_proba)  # type: ignore
    
    training_results = {
        "metrics": metrics,
        "feature_importance": feature_importance.to_dict(),
        "training_samples": len(X_train),
        "test_samples": len(X_test),
        "feature_count": len(feature_names),
        "model": model  # Include the trained model
    }
    
    print(f"✅ Model training completed:")
    print(f"  Accuracy: {metrics['accuracy']:.4f}")
    print(f"  F1-Score: {metrics['f1_score']:.4f}")
    
    # Save results to files
    artifacts_dir = Path("zenml_artifacts")
    artifacts_dir.mkdir(exist_ok=True)
    
    # Save feature importance
    feature_importance_path = artifacts_dir / "feature_importance.csv"
    feature_importance.to_csv(feature_importance_path, index=False)
    print(f"  💾 Feature importance saved to {feature_importance_path}")
    
    # Save training results
    training_results_path = artifacts_dir / "training_results.json"
    with open(training_results_path, 'w') as f:
        # Create a copy without the model for JSON serialization
        json_results = training_results.copy()
        del json_results['model']
        json.dump(json_results, f, indent=2, default=str)
    print(f"  💾 Training results saved to {training_results_path}")
    
    # Save model
    model_path = artifacts_dir / "tls_classifier_model.joblib"
    joblib.dump(model, model_path)
    print(f"  💾 Model saved to {model_path}")
    
    return training_results, feature_importance.to_dict()

@pipeline
def tls_classification_zenml_pipeline(data_dir: str = "data"):
    """Complete TLS classification pipeline for ZenML."""
    # Step 1: Load and validate data
    log_dataframes, validation_report = validate_and_load_data_step(data_dir=data_dir)
    
    # Step 2: Extract features
    features_df, labels_array, feature_metadata = extract_features_step(log_dataframes=log_dataframes)
    
    # Step 3: Train model
    training_results, feature_importance = train_model_step(
        features_df=features_df,
        labels_array=labels_array
    )
    
    return {
        "validation_report": validation_report,
        "feature_metadata": feature_metadata,
        "training_results": training_results,
        "feature_importance": feature_importance
    }

if __name__ == "__main__":
    print("🚀 Running TLS Classification Pipeline with ZenML")
    print("=" * 60)
    
    # Run pipeline
    results = tls_classification_zenml_pipeline(data_dir="data")
    
    print(f"\n✅ Pipeline completed successfully!")
    print(f"📁 Artifacts saved to 'zenml_artifacts/' directory during training.")
