# from sklearn.cluster import KMeans  #导入聚类方法
# from sklearn.datasets import load_iris  #导入鸢尾花数据集
# import matplotlib.pyplot as plt  #导入绘图包
#
# iris_data = load_iris()  #载入鸢尾花数据
# k = 3  #设置簇的个数
# x = iris_data['data'][:,:2]  #使用鸢尾花数据的前两个特征进行聚类
# km = KMeans(n_clusters = k)  #创建聚类模型
# km.fit(x)  #对数据聚类
# label_pred = km.labels_  #获得每个样本的类别编号
# centroids = km.cluster_centers_   #获取簇的质心
#
# ##绘图
# plt.rcParams['font.sans-serif'] = ['SimHei']  #图中可以正常显示中文
# plt.rcParams['axes.unicode_minus'] = False  #图中可以正常显示负号
# plt.subplot(131)  #第一个子图
# plt.scatter(x[:,0],x[:,1],s = 5)  #绘制聚类前的散点图，点的大小设置为50
# plt.title('聚类前')
# plt.subplots_adjust(wspace=0.5) #用于调整边距和子图间距
# plt.subplot(132)  #第二个子图
# plt.scatter(x[:,0], x[:,1], c = label_pred, s = 5) #以聚类标签划分颜色
# plt.scatter(centroids[:,0], centroids[:,1], c = 'red', s = 5) #画出质心点
# plt.title('聚类结果')
# plt.subplot(133)   #第三个子图
# plt.scatter(x[:,0], x[:,1], c = iris_data['target'], s = 5) #以真实标签划分颜色,
# plt.title('真实类别')
# print(iris_data['target'])
# plt.show()
import numpy
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans,MeanShift,DBSCAN,BisectingKMeans

# 读取数据集
with open('testSet.txt', 'r') as file:
    lines = file.readlines()
    data = []
    for line in lines:
        values = line.strip().split()
        data.append([float(values[0]), float(values[1])])

# 将数据转换为NumPy数组
data = np.array(data)

# 计算不同簇数下的KMeans模型的性能指标（肘部法则）
# inertia = []
# for k in range(1, 11):  # 尝试不同的簇数，这里选择从1到10
#     kmeans = KMeans(n_clusters=k,init='random')
#     kmeans.fit(data)
#     inertia.append(kmeans.inertia_)  # 损失函数值（inertia_）越小越好
#
# # 绘制肘部法则图
# plt.figure(figsize=(8, 6))
# plt.plot(range(1, 11), inertia, marker='o')
# plt.xlabel('Number of Clusters (k)')
# plt.ylabel('Inertia')
# plt.title('Elbow Method to Find Optimal k')
# plt.grid(True)
# plt.show()
# print(numpy.diff(inertia,2))
# # 使用最优的簇数进行聚类
optimal_k = 4  # 假设我们得到的最优簇数是4
kmeans = KMeans(n_clusters=optimal_k,init='random')
kmeans.fit(data)
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
print(centroids)
plt.scatter(data[:,0],data[:,1])
plt.scatter(centroids[:,0],centroids[:,1],c="red")
plt.show()
# 可视化聚类结果
# plt.figure(figsize=(8, 6))
#
# for i in range(optimal_k):
#     cluster_data = data[labels == i]
#     plt.scatter(cluster_data[:, 0], cluster_data[:, 1], label=f'Cluster {i + 1}')
#
# plt.scatter(centroids[:, 0], centroids[:, 1], c='red', marker='x', s=200, label='Centroids')
# plt.title('KMeans Clustering with Optimal k')
# plt.xlabel('Feature 1')
# plt.ylabel('Feature 2')
# plt.legend()
# plt.grid(True)
# plt.show()
