import numpy as np
import pandas as pd
from sklearn.cluster import KMeans, DBSCAN
from sklearn.mixture import GaussianMixture
from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score
import matplotlib.pyplot as plt
import seaborn as sns

# 读取 Excel 数据
df = pd.read_excel(r'C:\pythondata\redwind.xlsx')

#数据清洗,若存在缺失值则取平均数，并修改文件，若不存在，输出None
print('数据清洗：')
missing_values = df.isnull().sum()
if missing_values.any():
    mean_values = df.mean()
    df_filled = df.fillna(mean_values)
    missing_locations = df.isnull()
    missing_rows, missing_cols = missing_locations.any(axis=1), missing_locations.any(axis=0)
    if missing_rows.any() or missing_cols.any():
        print("Missing values found/找到缺失的值:")
        if missing_rows.any():
            print("Rows with missing values/缺少值的行:")
            print(df[missing_rows])
        if missing_cols.any():
            print("Columns with missing values/缺少值的列:")
            print(df.loc[:, missing_cols])
else:
    print("None\n")

# 数据预处理
X = df.iloc[:, :-1].values  # 提取特征矩阵

# K-Means 聚类
print('K-Means Clustering:')
silhouette_scores = []
ch_scores = []
db_scores = []
for n_clusters in range(2, 11):
    kmeans = KMeans(n_clusters=n_clusters, random_state=42)
    labels = kmeans.fit_predict(X)
    silhouette_scores.append(silhouette_score(X, labels))
    ch_scores.append(calinski_harabasz_score(X, labels))
    db_scores.append(davies_bouldin_score(X, labels))
    print(f'Number of clusters: {n_clusters}, Silhouette Score: {silhouette_scores[-1]:.3f}, Calinski-Harabasz Score: {ch_scores[-1]:.3f}, Davies-Bouldin Score: {db_scores[-1]:.3f}')

# 确定最佳 K-Means 模型
best_k = np.argmax(silhouette_scores) + 2
print(f'\nBest K-Means model: {best_k} clusters')

# DBSCAN 聚类
print('\nDBSCAN Clustering:')
eps_range = np.linspace(0.1, 1.0, 10)
min_samples_range = [5, 10, 15, 20]
best_eps, best_min_samples, best_silhouette = None, None, -1
for eps in eps_range:
    for min_samples in min_samples_range:
        dbscan = DBSCAN(eps=eps, min_samples=min_samples)
        labels = dbscan.fit_predict(X)
        if len(np.unique(labels)) > 1:
            silhouette = silhouette_score(X, labels)
            if silhouette > best_silhouette:
                best_eps, best_min_samples, best_silhouette = eps, min_samples, silhouette
            print(f'eps: {eps:.2f}, min_samples: {min_samples}, Silhouette Score: {silhouette:.3f}')
        else:
            print(f'eps: {eps:.2f}, min_samples: {min_samples}, Silhouette Score: N/A (only 1 cluster)')

if best_eps is not None and best_min_samples is not None:
    print(f'\nBest DBSCAN model: eps={best_eps:.2f}, min_samples={best_min_samples}')
else:
    print('\nDBSCAN could not find a valid clustering model.')

# GMM 聚类
print('\nGaussian Mixture Model Clustering:')
n_components_range = range(2, 11)
bic_scores = []
for n_components in n_components_range:
    gmm = GaussianMixture(n_components=n_components, random_state=42)
    gmm.fit(X)
    bic_scores.append(gmm.bic(X))
    print(f'Number of components: {n_components}, BIC Score: {bic_scores[-1]:.3f}')

# 确定最佳 GMM 模型
best_n_components = np.argmin(bic_scores) + 2
print(f'\nBest GMM model: {best_n_components} components')

# 可视化聚类结果
fig, axes = plt.subplots(1, 3, figsize=(15, 5))

# K-Means 可视化
axes[0].set_title('K-Means Clustering')
kmeans = KMeans(n_clusters=best_k, random_state=42)
labels = kmeans.fit_predict(X)
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=labels, ax=axes[0])

# DBSCAN 可视化
axes[1].set_title('DBSCAN Clustering')
if best_eps is not None and best_min_samples is not None:
    dbscan = DBSCAN(eps=best_eps, min_samples=best_min_samples)
    labels = dbscan.fit_predict(X)
    sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=labels, ax=axes[1])
else:
    axes[1].text(0.5, 0.5, 'DBSCAN could not find a valid clustering model.', ha='center', va='center', transform=axes[1].transAxes)

# GMM 可视化
axes[2].set_title('GMM Clustering')
gmm = GaussianMixture(n_components=best_n_components, random_state=42)
labels = gmm.fit_predict(X)
sns.scatterplot(x=X[:, 0], y=X[:, 1], hue=labels, ax=axes[2])

plt.show()