from sklearn.mixture import GaussianMixture
from sklearn.preprocessing import StandardScaler
import numpy as np

def handle_feature_selection(features, high_access_cluster):
    # 特征标准化
    scaler = StandardScaler()
    X_normalized = scaler.fit_transform(features)

    # 自动选择最佳K值（示例用BIC准则）
    bic_scores = []
    K_range = range(2, 6)
    for k in K_range:
        gmm = GaussianMixture(n_components=k, random_state=0)
        gmm.fit(X_normalized)
        bic_scores.append(gmm.bic(X_normalized))

    best_K = K_range[np.argmin(bic_scores)]  # 选择BIC最小的K值

    # 训练最终模型
    gmm = GaussianMixture(n_components=best_K, random_state=0)
    gmm.fit(X_normalized)

    # 标记高访问概率簇（假设簇中心recency小、frequency大）
    cluster_centers = scaler.inverse_transform(gmm.means_)  # 反标准化后的簇中心
    high_access_cluster = np.argmax(cluster_centers[:, 1] - cluster_centers[:, 0])  # x2(frequency)-x1(recency)

    # 生成预测标签
    probs = gmm.predict_proba(X_normalized)  # 每个对象属于各簇的概率
    labels = gmm.predict(X_normalized)       # 概率最大的簇标签
    will_visit = (labels == high_access_cluster).astype(int)  # 1表示高访问概率