import pandas as pd
import numpy as np
from sklearn.ensemble import (
    RandomForestClassifier,
    RandomForestRegressor
)
from sklearn.feature_selection import (
    mutual_info_classif,
    mutual_info_regression
)
from sklearn.preprocessing import LabelEncoder


def feature_select(data: pd.DataFrame, label_col: str, k: int) -> list[str]:
    """
    Select the most important K features according to the
    given dataset, label column and number of features K.

    Args:
        data (pd.DataFrame): Dataset for feature selection.
        label_col (str): Label column name for feature selection.
        k (int): Number of features to select.

    Returns:
        list[str]: The K most important features.
    """
    # Separating features and labels
    X = data.drop(columns=[label_col])
    y = data[label_col]
    # Preprocessing: processing non-numeric features
    for col in X.select_dtypes(include=['object', 'category']).columns:
        # High cardinality category features
        if X[col].nunique() > 10:
            freq = X[col].value_counts(normalize=True)
            X[col] = X[col].map(freq)
        # Low cardinality category characteristics
        else:
            le = LabelEncoder()
            X[col] = le.fit_transform(X[col].astype(str))
    # Fill in missing values
    X = X.fillna(X.median() if X.select_dtypes(include=np.number).shape[1] > 0 else X.mode().iloc[0])
    # Determine whether the problem is classification or regression
    problem_type = 'classification' if y.dtype == 'object' or y.nunique() < 10 else 'regression'
    # Calculate the importance of features using random forests
    if problem_type == 'classification':
        rf = RandomForestClassifier(n_estimators=100, random_state=42)
        rf.fit(X, y)
    else:
        rf = RandomForestRegressor(n_estimators=100, random_state=42)
        rf.fit(X, y)
    rf_importance = pd.Series(rf.feature_importances_, index=X.columns)
    # Calculate feature importance using mutual information
    if problem_type == 'classification':
        mi = mutual_info_classif(X, y, random_state=42)
    else:
        mi = mutual_info_regression(X, y, random_state=42)
    mi_importance = pd.Series(mi, index=X.columns)
    # Normalize the two importance scores and average them
    rf_importance_norm = (rf_importance - rf_importance.min()) / (rf_importance.max() - rf_importance.min())
    mi_importance_norm = (mi_importance - mi_importance.min()) / (mi_importance.max() - mi_importance.min())
    combined_importance = (rf_importance_norm + mi_importance_norm) / 2
    # Get the most important K features
    top_k_features = combined_importance.sort_values(ascending=False).head(k).index.tolist()
    return top_k_features
