import os
import time
import pandas as pd
import numpy as np
import joblib
from typing import Dict, List, Any, Tuple
import argparse
import glob
import random

from data_loader import segment_time_series
from feature_extraction import extract_statistical_features


def load_model_and_scaler(model_path: str, scaler_path: str) -> tuple:
    """
    Load the trained model and scaler.
    
    Args:
        model_path: Path to the trained model
        scaler_path: Path to the feature scaler
        
    Returns:
        Tuple of (model, scaler)
    """
    model = joblib.load(model_path)
    scaler = joblib.load(scaler_path)
    return model, scaler


def simulate_sensor_data(sample_dir: str, window_size: int = 100) -> Tuple[pd.DataFrame, str]:
    """
    Simulate real-time sensor data by randomly sampling from existing data files.
    
    Args:
        sample_dir: Directory containing sample data files
        window_size: Number of samples to return
        
    Returns:
        DataFrame containing simulated sensor data and the actual class
    """
    # Get all CSV files in the directory
    csv_files = glob.glob(os.path.join(sample_dir, "*.csv"))
    
    if not csv_files:
        raise FileNotFoundError(f"No CSV files found in {sample_dir}")
    
    # Randomly select a file
    random_file = random.choice(csv_files)
    
    # Read the file
    df = pd.read_csv(random_file)
    
    # Get the class from the filename
    class_name = os.path.basename(random_file).split('-')[2]
    
    # Select a random starting point
    if len(df) > window_size:
        start_idx = random.randint(0, len(df) - window_size)
        segment = df.iloc[start_idx:start_idx + window_size].copy()
    else:
        segment = df.copy()
    
    # Select only the sensor columns
    sensor_cols = ['acc_x', 'acc_y', 'acc_z', 'angle_x', 'angle_y', 'angle_z']
    segment = segment[sensor_cols]
    
    return segment, class_name


def predict_real_time(model_path: str, scaler_path: str, data_dir: str, window_size: int = 100, 
                     interval: float = 1.0, num_predictions: int = 10):
    """
    Simulate real-time prediction by processing sensor data at regular intervals.
    
    Args:
        model_path: Path to the trained model
        scaler_path: Path to the feature scaler
        data_dir: Directory containing sample data files
        window_size: Size of the window in number of samples
        interval: Time interval between predictions in seconds
        num_predictions: Number of predictions to make (None for infinite)
    """
    # Load model and scaler
    model_data, scaler = load_model_and_scaler(model_path, scaler_path)
    
    # Extract the classifier from the model data
    if isinstance(model_data, dict) and 'model' in model_data:
        model = model_data['model']
        class_names = model_data.get('classes', model.classes_ if hasattr(model, 'classes_') else None)
    else:
        model = model_data
        class_names = model.classes_ if hasattr(model, 'classes_') else None
    
    if class_names is None:
        # Try to infer class names from the data directory
        class_names = []
        for filename in os.listdir(data_dir):
            if filename.endswith('.csv'):
                class_name = os.path.basename(filename).split('-')[2]
                if class_name not in class_names:
                    class_names.append(class_name)
    
    count = 0
    correct_predictions = 0
    
    print(f"Starting real-time prediction simulation (interval: {interval}s)")
    print("-" * 60)
    
    try:
        while num_predictions is None or count < num_predictions:
            # Simulate getting sensor data
            segment, actual_class = simulate_sensor_data(data_dir, window_size)
            
            # Extract features directly using extract_statistical_features
            features_dict = extract_statistical_features(segment)
            features_df = pd.DataFrame([features_dict])
            
            # Normalize features using the loaded scaler
            normalized_features = pd.DataFrame(
                scaler.transform(features_df),
                columns=features_df.columns
            )
            
            # Make prediction
            prediction = model.predict(normalized_features)[0]
            probabilities = model.predict_proba(normalized_features)[0]
            
            # Get the probability for each class
            class_probs = {class_names[i]: prob for i, prob in enumerate(probabilities)}
            
            # Print the prediction
            print(f"Prediction {count+1}:")
            print(f"Actual class: {actual_class}")
            print(f"Predicted class: {prediction}")
            print("Class probabilities:")
            
            # Sort probabilities in descending order
            sorted_probs = sorted(class_probs.items(), key=lambda x: x[1], reverse=True)
            for cls, prob in sorted_probs[:3]:  # Show top 3 probabilities
                print(f"  {cls}: {prob:.4f}")
            
            # Update statistics
            if prediction == actual_class:
                correct_predictions += 1
            
            accuracy = correct_predictions / (count + 1)
            print(f"Current accuracy: {accuracy:.4f} ({correct_predictions}/{count+1})")
            print("-" * 60)
            
            count += 1
            
            # Wait for the next interval
            if num_predictions is None or count < num_predictions:
                time.sleep(interval)
    
    except KeyboardInterrupt:
        print("\nReal-time prediction simulation stopped by user")
    
    finally:
        if count > 0:
            final_accuracy = correct_predictions / count
            print(f"\nFinal accuracy: {final_accuracy:.4f} ({correct_predictions}/{count})")


if __name__ == "__main__":
    # Get the absolute path to the project root directory
    script_dir = os.path.dirname(os.path.abspath(__file__))
    project_root = os.path.dirname(script_dir)
    
    # Default paths relative to project root
    default_data_dir = os.path.join(project_root, "data")
    default_model_path = os.path.join(project_root, "output", "model.joblib")
    default_scaler_path = os.path.join(project_root, "output", "scaler.joblib")
    
    parser = argparse.ArgumentParser(description='Real-time IoT Sensor Classification')
    parser.add_argument('--model', type=str, default=default_model_path,
                       help='Path to the trained model')
    parser.add_argument('--scaler', type=str, default=default_scaler_path,
                       help='Path to the feature scaler')
    parser.add_argument('--data_dir', type=str, default=default_data_dir,
                       help='Directory containing sample data files')
    parser.add_argument('--window_size', type=int, default=100,
                       help='Size of the window in number of samples')
    parser.add_argument('--interval', type=float, default=1.0,
                       help='Time interval between predictions in seconds')
    parser.add_argument('--num_predictions', type=int, default=10,
                       help='Number of predictions to make (default: 10, 0 for infinite)')
    
    args = parser.parse_args()
    
    # Convert 0 to None for infinite predictions
    num_predictions = None if args.num_predictions == 0 else args.num_predictions
    
    predict_real_time(
        model_path=args.model,
        scaler_path=args.scaler,
        data_dir=args.data_dir,
        window_size=args.window_size,
        interval=args.interval,
        num_predictions=num_predictions
    )
