import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt

########################################################################################################
# 使用silhouette_score方法确定k值不一定好用，至少针对暂时仅使用count一个特征的聚类不好用


# 加载日志数据
log_data = pd.read_csv('data.csv')

# 计算每对源IP和目的IP的连接次数
connection_counts = log_data.groupby(['src_ip', 'dst_ip', 'dst_port']).size().reset_index(name='count')

# 标准化特征数据 (连接次数)
scaler = StandardScaler()
scaled_counts = scaler.fit_transform(connection_counts[['count']])

# 初始化指标列表
k_values = []
silhouette_scores = []

# 迭代尝试不同的k值
for k in range(2, 11):
    # 使用K均值聚类算法
    kmeans = KMeans(n_clusters=k, random_state=0)
    kmeans.fit(scaled_counts)
    labels = kmeans.labels_

    # 计算轮廓系数评估聚类结果
    silhouette_avg = silhouette_score(scaled_counts, labels)

    # 保存k值和对应的轮廓系数
    k_values.append(k)
    silhouette_scores.append(silhouette_avg)

# 绘制轮廓系数随不同k值的变化曲线
plt.plot(k_values, silhouette_scores, 'bo-')
plt.xlabel('k')
plt.ylabel('Silhouette Score')
plt.title('Silhouette Score for Different k Values')
plt.show()

# 找出轮廓系数最大的k值
best_k = k_values[silhouette_scores.index(max(silhouette_scores))]
print("Best k value:", best_k)

# 使用最优k值进行最终聚类
kmeans_final = KMeans(n_clusters=best_k, random_state=0)
kmeans_final.fit(scaled_counts)
labels_final = kmeans_final.labels_

# 输出聚类结果
connection_counts['cluster'] = labels_final
print(connection_counts)
connection_counts.to_csv("kmeans1.csv", encoding = 'utf-8')