import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score

# 读取数据集
data = pd.read_csv("sales_data.csv")

# 特征选择
X = data[["sales", "price"]]

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 调参实验
eps_list = [0.1, 0.5, 1, 2, 5]
min_samples_list = [2, 5, 10, 20, 50]
for eps in eps_list:
    for min_samples in min_samples_list:
        dbscan = DBSCAN(eps=eps, min_samples=min_samples)
        dbscan.fit(X_scaled)
        labels = dbscan.labels_
        n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
        silhouette = silhouette_score(X_scaled, labels)
        calinski_harabasz = calinski_harabasz_score(X_scaled, labels)
        davies_bouldin = davies_bouldin_score(X_scaled, labels)
        print("eps={}, min_samples={}, n_clusters={}, silhouette={}, calinski_harabasz={}, davies_bouldin={}".format(
            eps, min_samples, n_clusters, silhouette, calinski_harabasz, davies_bouldin))

# 聚类效果比较评估
dbscan = DBSCAN(eps=1, min_samples=5)
dbscan.fit(X_scaled)
labels = dbscan.labels_
n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
silhouette = silhouette_score(X_scaled, labels)
calinski_harabasz = calinski_harabasz_score(X_scaled, labels)
davies_bouldin = davies_bouldin_score(X_scaled, labels)
print("n_clusters={}, silhouette={}, calinski_harabasz={}, davies_bouldin={}".format(
    n_clusters, silhouette, calinski_harabasz, davies_bouldin))

# 可视化聚类结果
plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=labels)
plt.xlabel("sales")
plt.ylabel("price")
plt.show()