import time
import pandas as pd
import mlflow
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder

# --- Node Handlers ---
# Each handler now receives the node's parameters and a list of parent outputs (DataFrames).
# It returns its own output, which will be passed to its children.

def handle_read_csv(params, parent_outputs):
    """Reads a CSV file. It has no parents, so parent_outputs will be empty."""
    node_id = params.get('id', 'N/A')
    print(f"▶️ START: Executing READ_CSV node {node_id}...")
    file_path = params.get('file_path')
    if not file_path:
        raise ValueError(f"File path is missing for read_csv node {node_id}.")
    
    try:
        full_path = f"./{file_path}"
        df = pd.read_csv(full_path)
        print(f"✅ SUCCESS: Read {len(df)} rows from {full_path}.")
        return df # Return the DataFrame directly
    except Exception as e:
        print(f"❌ ERROR: Failed to read CSV for node {node_id}. Error: {e}")
        raise

def handle_select_columns(params, parent_outputs):
    """Selects specific columns from an upstream DataFrame."""
    node_id = params.get('id', 'N/A')
    print(f"▶️ START: Executing SELECT_COLUMNS node {node_id}...")
    
    if not parent_outputs or parent_outputs[0] is None:
        raise ValueError(f"Node {node_id} requires an input connection.")
    
    df = parent_outputs[0] # Assume first parent is the main dataframe
    
    columns_str = params.get('columns', '')
    if not columns_str:
        raise ValueError(f"Columns parameter is missing for node {node_id}.")
        
    columns_to_select = [col.strip() for col in columns_str.split(',') if col.strip()]
    
    try:
        missing_cols = [col for col in columns_to_select if col not in df.columns]
        if missing_cols:
            raise ValueError(f"The following columns were not found: {missing_cols}")

        selected_df = df[columns_to_select]
        print(f"✅ SUCCESS: Selected columns: {columns_to_select}.")
        return selected_df # Return the new DataFrame
    except Exception as e:
        print(f"❌ ERROR: Failed to select columns for node {node_id}. Error: {e}")
        raise

def handle_default_node(params, parent_outputs):
    """A default node that prints info about the upstream DataFrame."""
    node_id = params.get('id', 'N/A')
    print(f"▶️ START: Executing DEFAULT (Inspect Data) node {node_id}...")
    
    if not parent_outputs or parent_outputs[0] is None:
        raise ValueError(f"Node {node_id} requires an input connection.")
        
    df = parent_outputs[0]
    print("ℹ️ Input DataFrame Info:")
    print(df.info())
    
    print(f"✅ SUCCESS: Finished executing DEFAULT node {node_id}.")
    return df # Pass through the DataFrame without modification

def handle_random_forest_train(params, parent_outputs):
    """Trains a RandomForestClassifier and logs the experiment to MLflow."""
    node_id = params.get('id', 'N/A')
    print(f"▶️ START: Executing RANDOM_FOREST_TRAIN node {node_id}...")
    
    if not parent_outputs or parent_outputs[0] is None:
        raise ValueError(f"Node {node_id} requires an input connection with data.")
    df = parent_outputs[0]

    exp_name = params.get('experiment_name', 'Default_Experiment')
    target_col = params.get('target_column')
    n_estimators = int(params.get('n_estimators', 100))
    max_depth = int(params.get('max_depth', 10))

    if not target_col or target_col not in df.columns:
        raise ValueError(f"Target column '{target_col}' not found or specified for node {node_id}.")

    mlflow.set_experiment(exp_name)
    with mlflow.start_run() as run:
        run_id = run.info.run_id
        print(f"🚀 MLflow Run Started: {run_id} in experiment '{exp_name}'")
        mlflow.log_params({"node_id": node_id, "n_estimators": n_estimators, "max_depth": max_depth, "target_column": target_col})

        X = df.drop(columns=[target_col])
        y = df[target_col]
        X = pd.get_dummies(X, drop_first=True)
        if y.dtype == 'object':
            le = LabelEncoder()
            y = le.fit_transform(y)
            mlflow.log_param("label_encoding", "applied")
            mlflow.log_dict({str(k): v for k, v in enumerate(le.classes_)}, "class_labels.json")

        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

        print("⏳ Training RandomForestClassifier...")
        mlflow.sklearn.autolog(log_models=True, log_input_examples=True, silent=True)
        model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, random_state=42)
        model.fit(X_train, y_train)
        
        print("✅ SUCCESS: Model training complete.")
        try:
            exp_id = mlflow.get_experiment_by_name(exp_name).experiment_id
            print(f"📈 View MLflow Run: http://127.0.0.1:5000/#/experiments/{exp_id}/runs/{run_id}")
        except Exception:
            print("📈 Run logged to MLflow successfully.")

    # This node is a final step, so it doesn't need to return a DataFrame
    return f"MLflow run {run_id} completed."

# --- Node Type to Handler Mapping ---
NODE_TYPE_HANDLERS = {
    'read_csv': handle_read_csv,
    'select_columns': handle_select_columns,
    'inspect_data': handle_default_node,
    'random_forest_train': handle_random_forest_train,
}

# --- Main Execution Function ---
def execute_node_task(node, parent_outputs):
    """
    A wrapper function that finds the correct handler for a node and executes it.
    """
    node_type = node.get('type', 'default')
    handler = NODE_TYPE_HANDLERS.get(node_type)
    
    if handler:
        task_params = node.get('data', {})
        task_params['id'] = node['id']
        return handler(task_params, parent_outputs)
    else:
        print(f"⚠️ Warning: No handler found for node type '{node_type}'")
        # If a node has no handler, pass through the data from its first parent
        return parent_outputs[0] if parent_outputs else None