import pandas as pd
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt

# 读取CSV文件
log_data = pd.read_csv('data.csv')

# 统计连接次数
connection_counts = log_data.groupby(['src_ip', 'dst_ip', 'dst_port']).size().reset_index(name='count')

# 合并连接次数列到原始DataFrame
log_data = log_data.merge(connection_counts, on=['src_ip', 'dst_ip', 'dst_port'])

# 根据条件设置新列的值
log_data['common_port'] = log_data['dst_port'].apply(lambda x: 1 if x in [443, 80, 8080, 465] else 0)
log_data['common_count'] = log_data['dst_port'].apply(lambda x: 1 if x > 10 else 0)

# # 将结果保存到CSV文件
# log_data.to_csv('kmeans2.csv', encoding='utf-8', index=False)


# 标准化特征数据 (连接次数)
scaler = StandardScaler()
# scaled_counts = scaler.fit_transform(log_data[['count', 'total_packet_count', 'total_packet_bytes']])
# 考虑包大小以及次数
# scaled_counts = scaler.fit_transform(log_data[['count', 'in_packet_bytes', 'out_packet_bytes']])
# 考虑常规端口，次数以及包大小
scaled_counts = scaler.fit_transform(log_data[['count', 'common_count', 'common_port', 'in_packet_bytes', 'out_packet_bytes']])


# 初始化指标列表
k_values = []
silhouette_scores = []

# 迭代尝试不同的k值
for k in range(2, 11):
    # 使用K均值聚类算法
    kmeans = KMeans(n_clusters=k, random_state=0)
    kmeans.fit(scaled_counts)
    labels = kmeans.labels_

    # 计算轮廓系数评估聚类结果
    silhouette_avg = silhouette_score(scaled_counts, labels)

    # 保存k值和对应的轮廓系数
    k_values.append(k)
    silhouette_scores.append(silhouette_avg)

# 绘制轮廓系数随不同k值的变化曲线
plt.plot(k_values, silhouette_scores, 'bo-')
plt.xlabel('k')
plt.ylabel('Silhouette Score')
plt.title('Silhouette Score for Different k Values')
plt.show()

# 找出轮廓系数最大的k值
best_k = k_values[silhouette_scores.index(max(silhouette_scores))]
print("Best k value:", best_k)
k = best_k

# 调用聚类算法（这里使用K均值聚类，请根据需要选择合适的聚类算法）
kmeans = KMeans(n_clusters=k, random_state=0, n_init='auto')
kmeans.fit(scaled_counts)
labels = kmeans.labels_
# print(labels)
cluster_centers = kmeans.cluster_centers_
# print(cluster_centers)
result = log_data[['src_ip', 'dst_ip', 'dst_port', 'count', 'in_packet_bytes', 'out_packet_bytes', 'total_packet_bytes', 'domain']]
result['label'] = pd.DataFrame(labels)
print(result)
result.to_csv("kmeans2.csv", encoding = 'utf-8')


# 初始化score列
result['score'] = 0

# 判断目的端口和连接次数，更新score列
for i, row in result.iterrows():
    dst_ip = row['dst_ip']
    count = row['count']
    if log_data[(log_data['dst_ip'] == dst_ip) & (log_data['dst_port'].isin([80, 443, 8080, 465]))].shape[0] > 0:
        result.loc[i, 'score'] += 0
    else:
        result.loc[i, 'score'] += 1
    if count <= 10:
        result.loc[i, 'score'] += 1
    else:
        result.loc[i, 'score'] += 0


# 计算每个类别的平均值avg_score
avg_scores = result.groupby('label')['score'].mean()

# 按照avg_score从高到低排序
sorted_labels = avg_scores.sort_values(ascending=False).index

# 输出每个类别包含的目的节点和相应的端口号
for label in sorted_labels:
    group = result[result['label'] == label]
    ips = group['dst_ip'].unique()
    print("Cluster {}: {}".format(label, ', '.join(ips)))

# # 输出每个类别包含的目的节点和相应的端口号
# for label in set(labels):
#     group = result[result['label'] == label]
#     ips = group['dst_ip'].unique()
#     print("Cluster {}: {}".format(label, ', '.join(ips)))