import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
import time
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

start_time = time.time()

# 计算指标值
def calculate_metrics4_df(pred, y):
    # 将 DataFrame 列转换为 NumPy 数组
    pred = np.array(pred)
    y = np.array(y)

    # 计算混淆矩阵的各项，如果聚类指标跟Yb不一致
    TP = np.sum((pred == 0) & (y == 1))
    TN = np.sum((pred == 1) & (y == 0))
    FP = np.sum((pred == 0) & (y == 0))
    FN = np.sum((pred == 1) & (y == 1))

    # # 计算混淆矩阵的各项
    # TP = np.sum((pred == 1) & (y == 1))
    # TN = np.sum((pred == 0) & (y == 0))
    # FP = np.sum((pred == 1) & (y == 0))
    # FN = np.sum((pred == 0) & (y == 1))
    
    # 计算准确率、精确率、召回率和 F1 分数
    acc1 = (TP + TN) / (TP + TN + FP + FN)
    precision1 = TP / (TP + FP) if (TP + FP) > 0 else 0
    recall1 = TP / (TP + FN) if (TP + FN) > 0 else 0
    f11 = 2 * precision1 * recall1 / (precision1 + recall1) if (precision1 + recall1) > 0 else 0

    return acc1, precision1, recall1, f11


""" 加载特征数据 """

#####################################################################

# node_features_path = 'data/ClfSim.csv'
node_features_path = 'data/HGSim.csv'

node_df = pd.read_csv(node_features_path)

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(node_df[['sim']])  # shape=(n,1)

# 直接聚类（需显式reshape为二维数组）
kmeans = KMeans(n_clusters=2, random_state=0)
predicted_labels = kmeans.fit_predict(X_scaled)  # 输入shape=(n_samples,1)
node_df['pre'] = predicted_labels

# 获取预测值和真实值
pred = node_df['pre'].values
y = node_df['Ture'].values
acc1,precision1, recall1, f11= calculate_metrics4_df(pred, y)
print(acc1)
print('parameter1: {:.4f} {:.4f} {:.4f}'.format(precision1, recall1, f11))

node_df.to_csv('dataCPR/HGKmeans.csv', index=False)

print(f"总执行时间: {time.time()-start_time:.2f}秒")






