#!/usr/bin/env python3
"""
Main CLI interface for TLS Traffic Classification Pipeline

Usage:
    python main.py run_pipeline    # Run complete ZenML pipeline
    python main.py convert         # Convert PCAPs to Zeek logs
    python main.py evaluate        # Evaluate trained model
    python main.py clear           # Remove all intermediate and final artifacts
    python main.py status          # Show current status
    python main.py --help          # Show help message
"""

import argparse
import shutil
import sys
from pathlib import Path
import subprocess
import os
import time
from tqdm import tqdm
import joblib
import pandas as pd


def run_pipeline():
    """Run complete ZenML pipeline."""
    print("🚀 Starting TLS Traffic Classification Pipeline...")
    print("=" * 60)
    
    try:
        # Check if data directory exists
        data_dir = Path("data")
        if not data_dir.exists():
            print("❌ Data directory not found. Please create 'data/' directory and add PCAP files.")
            print("💡 Tip: Run 'python main.py convert' after adding PCAP files.")
            return False
        
        # Check if Zeek logs exist
        zeek_dirs = list(data_dir.glob("*_zeek_logs"))
        if not zeek_dirs:
            print("⚠️  No Zeek logs found. Attempting to convert PCAP files first...")
            if not convert_pcaps():
                print("❌ Failed to convert PCAP files. Please check your data.")
                return False
        
        # Run zenml pipeline
        result = subprocess.run([
            "uv", "run", "zenml_pipeline.py"
        ], check=True, capture_output=False, text=True)
        
        print("\n✅ Pipeline completed successfully!")
        print("📁 Check 'zenml_artifacts/' directory for results.")
        return True
        
    except subprocess.CalledProcessError as e:
        print(f"\n❌ Pipeline failed with exit code {e.returncode}")
        if e.stdout:
            print(f"Output: {e.stdout}")
        if e.stderr:
            print(f"Error: {e.stderr}")
        print("\n💡 Troubleshooting tips:")
        print("   • Check if Zeek logs are properly formatted")
        print("   • Ensure sufficient disk space and memory")
        print("   • Try running 'python main.py clear' first")
        return False
    except FileNotFoundError:
        print("❌ 'uv' command not found. Please install uv first:")
        print("   curl -LsSf https://astral.sh/uv/install.sh | sh")
        return False
    except Exception as e:
        print(f"❌ Unexpected error: {e}")
        print("💡 Please report this issue with the error message above.")
        return False


def clear_artifacts():
    """Remove all intermediate and final artifacts."""
    print("🧹 Cleaning up artifacts...")
    
    # Define directories and files to remove
    artifacts_to_remove = [
        "zenml_artifacts",
        "artifacts", 
        "__pycache__",
        ".pytest_cache",
        ".mypy_cache",
        "*.pyc",
        "*.pyo",
        ".coverage",
        "htmlcov",
        ".tox",
        "build",
        "dist",
        "*.egg-info"
    ]
    
    # Also remove any zeek log directories
    data_dir = Path("data")
    if data_dir.exists():
        zeek_dirs = list(data_dir.glob("*_zeek_logs"))
        artifacts_to_remove.extend([str(d) for d in zeek_dirs])
    
    removed_count = 0
    total_size_freed = 0
    
    for artifact in artifacts_to_remove:
        if "*" in artifact:  # Handle glob patterns
            import glob
            for path in glob.glob(artifact):
                if Path(path).exists():
                    size = get_directory_size(path) if Path(path).is_dir() else Path(path).stat().st_size
                    try:
                        if Path(path).is_dir():
                            shutil.rmtree(path)
                        else:
                            os.remove(path)
                        print(f"  ✅ Removed: {path}")
                        removed_count += 1
                        total_size_freed += size
                    except Exception as e:
                        print(f"  ❌ Failed to remove {path}: {e}")
        else:
            path = Path(artifact)
            if path.exists():
                size = get_directory_size(path) if path.is_dir() else path.stat().st_size
                try:
                    if path.is_dir():
                        shutil.rmtree(path)
                    else:
                        os.remove(path)
                    print(f"  ✅ Removed: {artifact}")
                    removed_count += 1
                    total_size_freed += size
                except Exception as e:
                    print(f"  ❌ Failed to remove {artifact}: {e}")
    
    # Format size display
    if total_size_freed > 0:
        size_mb = total_size_freed / (1024 * 1024)
        if size_mb > 1024:
            size_display = f"{size_mb/1024:.2f} GB"
        else:
            size_display = f"{size_mb:.2f} MB"
    else:
        size_display = "0 MB"
    
    print(f"\n🎯 Cleanup completed!")
    print(f"   Files/directories removed: {removed_count}")
    print(f"   Space freed: {size_display}")


def get_directory_size(path):
    """Calculate total size of a directory."""
    total_size = 0
    try:
        for dirpath, dirnames, filenames in os.walk(path):
            for filename in filenames:
                filepath = os.path.join(dirpath, filename)
                if os.path.exists(filepath):
                    total_size += os.path.getsize(filepath)
    except (OSError, PermissionError):
        pass  # Skip files we can't access
    return total_size


def convert_pcaps(data_dir="data"):
    """Convert PCAP files to Zeek logs."""
    print("🔄 Converting PCAP files to Zeek logs...")
    
    data_path = Path(data_dir)
    if not data_path.exists():
        print(f"❌ Data directory {data_dir} not found")
        print("💡 Please create the directory and add PCAP files:")
        print(f"   mkdir -p {data_dir}")
        print(f"   cp *.pcap {data_dir}/")
        return False
    
    pcap_files = list(data_path.glob("*.pcap"))
    if not pcap_files:
        print(f"❌ No PCAP files found in {data_dir}")
        print("💡 Please add PCAP files to the data directory.")
        return False
    
    print(f"📁 Found {len(pcap_files)} PCAP files")
    
    # Check container runtime
    container_runtime = None
    for runtime in ["podman", "docker"]:
        try:
            subprocess.run([runtime, "--version"], capture_output=True, check=True)
            container_runtime = runtime
            break
        except (subprocess.CalledProcessError, FileNotFoundError):
            continue
    
    if not container_runtime:
        print("❌ No container runtime found (Podman or Docker)")
        print("💡 Please install one of the following:")
        print("   Ubuntu/Debian: sudo apt-get install podman")
        print("   Ubuntu/Debian: sudo apt-get install docker.io")
        return False
    
    print(f"📦 Using {container_runtime} for Zeek processing")
    
    success_count = 0
    for pcap_file in tqdm(pcap_files, desc="Converting PCAPs"):
        print(f"\n🔄 Processing {pcap_file.name}...")
        
        # Create output directory
        output_dir = data_path / f"{pcap_file.stem}_zeek_logs"
        output_dir.mkdir(exist_ok=True)
        
        # Check if logs already exist
        existing_logs = list(output_dir.glob("*.log"))
        if existing_logs:
            print(f"  ⚠️  Found {len(existing_logs)} existing log files, skipping...")
            success_count += 1
            continue
        
        # Check PCAP file integrity
        try:
            file_size = pcap_file.stat().st_size
            if file_size == 0:
                print(f"  ❌ PCAP file is empty, skipping...")
                continue
            print(f"  📄 PCAP file size: {file_size / (1024*1024):.1f} MB")
        except OSError:
            print(f"  ❌ Cannot read PCAP file, skipping...")
            continue
        
        # Build Zeek command
        cmd = [
            container_runtime, "run", "--rm",
            "-v", f"{pcap_file.absolute()}:/input.pcap:ro",
            "-v", f"{output_dir.absolute()}:/output",
            "zeek/zeek",
            "zeek", "-r", "/input.pcap", 
            "Log::default_logdir=/output", 
            "local"
        ]
        
        try:
            with tqdm(total=100, desc=f"Processing {pcap_file.name}", leave=False) as pbar:
                pbar.set_description(f"Running Zeek on {pcap_file.name}")
                
                result = subprocess.run(
                    cmd, 
                    capture_output=True, 
                    text=True, 
                    check=True,
                    timeout=600  # 10 minute timeout
                )
                
                pbar.update(100)
            
            # Count generated log files
            log_files = list(output_dir.glob("*.log"))
            if log_files:
                print(f"  ✅ Generated {len(log_files)} log files")
                success_count += 1
            else:
                print(f"  ⚠️  No log files generated (PCAP might not contain TLS traffic)")
            
        except subprocess.TimeoutExpired:
            print(f"  ❌ Timeout processing {pcap_file.name} (file too large)")
            continue
        except subprocess.CalledProcessError as e:
            print(f"  ❌ Error processing {pcap_file.name}")
            if e.stderr:
                print(f"     Error: {e.stderr.strip()}")
            else:
                print("     No error details available")
            continue
        except Exception as e:
            print(f"  ❌ Unexpected error processing {pcap_file.name}: {e}")
            continue
    
    if success_count == len(pcap_files):
        print(f"\n✅ All {success_count} PCAP files converted successfully!")
        return True
    elif success_count > 0:
        print(f"\n⚠️  {success_count}/{len(pcap_files)} PCAP files converted successfully")
        return True
    else:
        print(f"\n❌ No PCAP files could be converted")
        return False


def evaluate_model(data_dir="data"):
    """Evaluate trained model on new data."""
    print("🔍 Evaluating trained model...")
    
    # Check if model exists
    model_path = Path("zenml_artifacts/tls_classifier_model.joblib")
    if not model_path.exists():
        print("❌ No trained model found. Run 'python main.py run_pipeline' first.")
        return False
    
    # Check if Zeek logs exist
    data_path = Path(data_dir)
    zeek_dirs = list(data_path.glob("*_zeek_logs"))
    if not zeek_dirs:
        print("❌ No Zeek logs found. Run 'python main.py convert' first.")
        return False
    
    try:
        # Load model
        print("📦 Loading trained model...")
        model_dict = joblib.load(model_path)
        model = model_dict['model']  # Extract the actual sklearn model
        
        # Load and process data directly from Zeek logs
        print("📊 Loading Zeek logs...")
        sys.path.insert(0, str(Path(__file__).parent / "src"))
        from aiops_eta.feature_extractor import TLSFeatureExtractor
        
        extractor = TLSFeatureExtractor()
        all_data = []
        
        # Process each zeek log directory
        for zeek_dir in zeek_dirs:
            ssl_log = zeek_dir / "ssl.log"
            if ssl_log.exists():
                print(f"   Processing {ssl_log}")
                df = extractor.parse_ssl_log(str(ssl_log))
                if not df.empty:
                    all_data.append(df)
        
        if not all_data:
            print("❌ No SSL data found in Zeek logs")
            return False
        
        # Combine all data
        combined_df = pd.concat(all_data, ignore_index=True)
        print(f"📈 Loaded {len(combined_df)} SSL records")
        
        # Extract features
        print("🔧 Extracting features...")
        features = extractor.extract_features(combined_df)
        
        # Get feature columns that match the model
        feature_cols = extractor.get_feature_columns()
        X = features[feature_cols].copy()
        
        # Ensure X is a DataFrame
        if not isinstance(X, pd.DataFrame):
            X = pd.DataFrame(X, columns=feature_cols)
        
        # Handle missing values and categorical data
        for col in X.columns:
            if col == 'src_port_category':
                # One-hot encode src_port_category to match training format
                X = pd.get_dummies(X, columns=['src_port_category'], prefix='src_port_category')
                break
        
        # Ensure all expected columns exist (fill missing ones with 0)
        expected_features = model_dict['feature_names']
        for feature in expected_features:
            if feature not in X.columns:
                X[feature] = 0
        
        # Reorder columns to match training order
        X = X[expected_features]
        
        # Fill any remaining NaN values with 0
        X = X.fillna(0)
        
        print(f"   Final feature matrix shape: {X.shape}")
        
        # Make predictions
        print("🎯 Making predictions...")
        predictions = model.predict(X)
        probabilities = model.predict_proba(X)
        
        # Show results
        malicious_count = sum(predictions)
        benign_count = len(predictions) - malicious_count
        
        print(f"\n📊 Evaluation Results:")
        print(f"   Total records: {len(predictions)}")
        print(f"   Predicted malicious: {malicious_count} ({malicious_count/len(predictions)*100:.1f}%)")
        print(f"   Predicted benign: {benign_count} ({benign_count/len(predictions)*100:.1f}%)")
        
        # Show confidence scores
        if len(probabilities) > 0:
            avg_confidence = probabilities.max(axis=1).mean()
            print(f"   Average confidence: {avg_confidence:.3f}")
        
        # Show feature importance if available
        if hasattr(model, 'feature_importances_'):
            try:
                feature_names = model_dict.get('feature_names', [f'feature_{i}' for i in range(len(model.feature_importances_))])
                importance_df = pd.DataFrame({
                    'feature': feature_names[:len(model.feature_importances_)],
                    'importance': model.feature_importances_
                }).sort_values('importance', ascending=False)
                
                print(f"\n🔍 Top 5 Important Features:")
                for i, row in importance_df.head(5).iterrows():
                    print(f"   {row['feature']}: {row['importance']:.4f}")
            except Exception as e:
                print(f"   Could not show feature importance: {e}")
        
        # Save predictions
        output_file = Path("evaluation_results.csv")
        results_df = X.copy()
        results_df['prediction'] = predictions
        
        # Handle probability output (might be 1D if only one class)
        if probabilities.shape[1] > 1:
            results_df['malicious_probability'] = probabilities[:, 1]
        else:
            # If only one class, use the single probability as malicious probability
            results_df['malicious_probability'] = probabilities[:, 0]
        
        results_df.to_csv(output_file, index=False)
        print(f"   💾 Results saved to {output_file}")
        
        return True
        
    except Exception as e:
        print(f"❌ Error during evaluation: {e}")
        import traceback
        traceback.print_exc()
        return False


def show_status(data_dir="data"):
    """Show current status of artifacts and pipeline."""
    print("📊 TLS Traffic Classification Pipeline Status")
    print("=" * 50)
    
    # Check if artifacts exist
    artifacts_dir = Path("zenml_artifacts")
    if artifacts_dir.exists():
        files = list(artifacts_dir.glob("*"))
        print(f"📁 Artifacts directory: {len(files)} files")
        for file in files:
            size_mb = file.stat().st_size / (1024 * 1024)
            print(f"   - {file.name} ({size_mb:.2f} MB)")
    else:
        print("📁 Artifacts directory: Not found")
    
    # Check data directory
    data_dir = Path(data_dir)
    if data_dir.exists():
        pcap_files = list(data_dir.glob("*.pcap"))
        zeek_dirs = list(data_dir.glob("*_zeek_logs"))
        print(f"📂 Data directory: {len(pcap_files)} PCAP files, {len(zeek_dirs)} Zeek log directories")
        
        # Show PCAP details
        if pcap_files:
            print("   PCAP files:")
            for pcap in pcap_files:
                size_mb = pcap.stat().st_size / (1024 * 1024)
                print(f"     - {pcap.name} ({size_mb:.2f} MB)")
    else:
        print("📂 Data directory: Not found")
    
    # Check pipeline script
    pipeline_script = Path("zenml_pipeline.py")
    if pipeline_script.exists():
        print(f"🐍 Pipeline script: Found")
    else:
        print(f"🐍 Pipeline script: Not found")
    
    # Check uv installation
    try:
        result = subprocess.run(["uv", "--version"], capture_output=True, text=True)
        print(f"📦 UV package manager: {result.stdout.strip()}")
    except (FileNotFoundError, subprocess.CalledProcessError):
        print("📦 UV package manager: Not found")


def main():
    """Main CLI entry point."""
    parser = argparse.ArgumentParser(
        description="TLS Traffic Classification Pipeline CLI",
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
  python main.py run_pipeline           # Run complete pipeline
  python main.py convert                 # Convert PCAPs to Zeek logs
  python main.py evaluate                # Evaluate model on data
  python main.py status                  # Show current status
  python main.py clear                   # Clean up all artifacts
  python main.py --data-dir custom_data  # Use custom data directory
  python main.py --help                  # Show this help message
        """
    )
    
    parser.add_argument(
        "command",
        choices=["run_pipeline", "clear", "status", "convert", "evaluate"],
        help="Command to execute"
    )
    
    parser.add_argument(
        "--data-dir", "-d",
        default="data",
        help="Data directory path (default: data)"
    )
    
    parser.add_argument(
        "--verbose", "-v",
        action="store_true",
        help="Enable verbose output"
    )
    
    args = parser.parse_args()
    
    if args.command == "run_pipeline":
        run_pipeline()
    elif args.command == "clear":
        clear_artifacts()
    elif args.command == "status":
        show_status(args.data_dir)
    elif args.command == "convert":
        convert_pcaps(args.data_dir)
    elif args.command == "evaluate":
        evaluate_model(args.data_dir)


if __name__ == "__main__":
    main()
