import os
import pandas as pd
import numpy as np
from sklearn.manifold import TSNE, Isomap, LocallyLinearEmbedding, MDS, SpectralEmbedding
from sklearn.decomposition import PCA
from sklearn.cluster import OPTICS, AffinityPropagation, KMeans, DBSCAN
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score, davies_bouldin_score, calinski_harabasz_score
from sklearn.base import BaseEstimator, ClusterMixin
from scipy.spatial.distance import pdist, squareform, mahalanobis
from sklearn.neighbors import NearestNeighbors

# 聚类方法的尝试，在本.py文件中，包含了大量的方法（自定义或库函数），方法里面又需要不断调参，调试是否成功是论文实现的关键。
class CFSFDP(BaseEstimator, ClusterMixin):
    def __init__(self, dc=None):
        self.dc = dc
        self.labels_ = None

    def fit(self, X):
        if not self.dc:  # 如果未指定dc，则根据数据自动计算一个dc值
            self.dc = self._compute_dc(X)

        distances = squareform(pdist(X, metric='euclidean'))
        self.labels_ = self._cfsfdp_cluster(distances)
        return self

    def _compute_dc(self, X):
        """
        根据数据自动计算一个合适的dc值。
        """
        distances = pdist(X, metric='euclidean')
        dc = np.percentile(distances, 20)  # 可调整百分比获取不同密度的dc值
        return dc

    def _cfsfdp_cluster(self, distances):
        """
        根据密度和距离执行CFSFDP聚类。
        """
        n_samples = distances.shape[0]

        # 密度计算
        density = np.sum(distances < self.dc, axis=0)

        # 距离计算
        delta = np.zeros(n_samples)
        nneigh = np.zeros(n_samples, dtype=int)

        for i in range(n_samples):
            mask = (density > density[i])
            if np.any(mask):
                delta[i] = np.min(distances[i][mask])
                nneigh[i] = np.argmin(distances[i][mask])
            else:
                delta[i] = np.max(distances[i])
                nneigh[i] = i

        # 决策图参数：选择聚类中心
        # 通过某种策略，例如选择密度和delta的乘积最大的点作为中心
        gamma = density * delta
        num_clusters = 3  # 可调整，决定中心点的数量
        indices = np.argsort(gamma)[-num_clusters:]

        # 分配标签
        labels = np.full(n_samples, -1)
        for idx, center in enumerate(indices):
            labels[center] = idx

        # 将其余的点分配到最近的中心点
        for i in range(n_samples):
            if labels[i] == -1:
                labels[i] = labels[nneigh[i]]

        return labels


# 计算每个点的k最近邻距离
def compute_k_distances(X, k=5):
    nbrs = NearestNeighbors(n_neighbors=k).fit(X)
    distances, indices = nbrs.kneighbors(X)
    k_distances = distances[:, k - 1]
    return k_distances


# 设置每个点的eps和min_samples
def set_adaptive_params(k_distances):
    eps_values = k_distances * 1.5  # 假设eps为k距离的1.5倍
    density_scores = 1 / k_distances
    avg_density = np.mean(density_scores)
    min_samples = np.ceil(5 * density_scores / avg_density).astype(int)  # 根据密度设置min_samples
    return eps_values, min_samples


# 自适应DBSCAN
def adaptive_dbscan(X, eps_values, min_samples_values):
    labels = np.full(X.shape[0], -1, dtype=int)
    visited = np.zeros(X.shape[0], dtype=bool)
    cluster_id = 0

    for i in range(X.shape[0]):
        if not visited[i]:
            visited[i] = True
            neighbors = np.where(np.linalg.norm(X - X[i], axis=1) <= eps_values[i])[0]
            if len(neighbors) >= min_samples_values[i]:
                labels[i] = cluster_id
                while len(neighbors) > 0:
                    point = neighbors[0]
                    neighbors = neighbors[1:]
                    if not visited[point]:
                        visited[point] = True
                        new_neighbors = np.where(np.linalg.norm(X - X[point], axis=1) <= eps_values[point])[0]
                        if len(new_neighbors) >= min_samples_values[point]:
                            neighbors = np.append(neighbors, new_neighbors)
                    if labels[point] == -1:
                        labels[point] = cluster_id
                cluster_id += 1

    return labels

# 从CSV文件读取数据，并构建相似度矩阵
df = pd.read_csv("Data/DTW/CSJ/dtw_distances2.csv")
all_mmsis = np.unique(df[['MMSI1', 'MMSI2']].values)
similarity_matrix = np.zeros((len(all_mmsis), len(all_mmsis)))
mmsi_to_index = {mmsi: index for index, mmsi in enumerate(all_mmsis)}
for _, row in df.iterrows():
    i, j = mmsi_to_index[row['MMSI1']], mmsi_to_index[row['MMSI2']]
    similarity_matrix[i, j] = similarity_matrix[j, i] = row['DTW_Distance']
similarity_matrix_normalized = StandardScaler().fit_transform(similarity_matrix)


def perform_analysis(name, reducer):
    X_reduced = reducer.fit_transform(similarity_matrix_normalized)
    # clusterer = OPTICS(min_samples=30, xi=0.01, min_cluster_size=0.03)
    # 减小min_samples和min_cluster_size，增加xi
    # 调整OPTICS参数
    # 准备距离度量
    VI = np.linalg.inv(np.cov(X_reduced, rowvar=False))  # 计算逆协方差矩阵

    def mahalanobis_dist(x, y):
        return mahalanobis(x, y, VI)

    # 使用 'mahalanobis' 距离度量
    # clusterer = OPTICS(
    #     min_samples=150,
    #     metric=mahalanobis_dist,
    #     xi=0.0005,
    #     min_cluster_size=0.005,
    #     cluster_method='xi'
    # )
    # clusterer = OPTICS(
    #     min_samples=60,  # 增加了核心点的最小样本数
    #     max_eps=0.001,  # 显著减小最大邻域距离
    #     metric='euclidean',
    #     xi=0.0001,  # 聚类边界的陡峭程度
    #     min_cluster_size=0.08,
    #     cluster_method='xi'
    # )

    # # 使用 'chebyshev' 距离度量，最小样本数至少为 150
    # clusterer = OPTICS(
    #     min_samples=150,
    #     metric='chebyshev',
    #     xi=0.05,
    #     min_cluster_size=0.02,
    #     cluster_method='xi'
    # )

    # 使用 'canberra' 距离度量，最小样本数至少为 150
    clusterer = OPTICS(
        min_samples=100,
        metric='canberra',
        xi=0.05,
        min_cluster_size=0.02,
        cluster_method='xi'
    )

    # clusterer = KMeans(n_clusters=8)
    # clusterer = AffinityPropagation()
    # clusterer = CFSFDP()

    clusters = clusterer.fit_predict(X_reduced)

    # 自适应DBSCAN应用过程
    # k_distances = compute_k_distances(X_reduced)
    # eps_values, min_samples_values = set_adaptive_params(k_distances)
    # clusters = adaptive_dbscan(X_reduced, eps_values, min_samples_values)

    valid_clusters = clusters[clusters != -1]
    valid_X = X_reduced[clusters != -1]

    # # # 保存MMSI对应的聚类结果：all_mmsis是所有唯一MMSI的数组
    # mmsi_cluster_pairs = list(zip(all_mmsis, clusters))
    # # 转换为DataFrame以便于操作和保存
    # mmsi_cluster_df = pd.DataFrame(mmsi_cluster_pairs, columns=['MMSI', 'Cluster'])
    # print(mmsi_cluster_df.head())  # 打印前几行以检查结果
    # # 保存到CSV文件
    # mmsi_cluster_df.to_csv(os.path.join("./Data/DTW/CSJ/LaplacianEigenmaps_Optics", "mmsi_cluster_pairs2.csv"), index=False)

    # 轮廓系数（Silhouette Coefficient）：衡量样本之间的相似度，值越接近1表示聚类效果越好。
    # Davies-Bouldin Index：尽量使同一聚类中的点更紧密，不同聚类间的点更分散，值越低表示聚类效果越好。
    # Calinski-Harabasz Index：类间离散度与类内离散度的比率，值越大表示聚类效果越好。
    silhouette = silhouette_score(valid_X, valid_clusters) if np.any(valid_clusters) else -1
    davies_bouldin = davies_bouldin_score(valid_X, valid_clusters) if np.any(valid_clusters) else float('inf')
    calinski_harabasz = calinski_harabasz_score(valid_X, valid_clusters) if np.any(valid_clusters) else 0

    print(f"Silhouette Coefficient for OPTICS with {name}: {silhouette}")
    print(f"Davies-Bouldin Index for OPTICS with {name}: {davies_bouldin}")
    print(f"Calinski-Harabasz Index for OPTICS with {name}: {calinski_harabasz}")
    plot_clusters(X_reduced, clusters, f'Clusters found by OPTICS with {name}', f'OPTICS_{name.lower()}')


def plot_clusters(X_reduced, clusters, title, file_name):
    plt.figure(figsize=(8, 6))
    unique_labels = np.unique(clusters)
    for label in unique_labels:
        color = 'k' if label == -1 else plt.cm.jet(float(label) / max(unique_labels + 1))
        plt.scatter(X_reduced[clusters == label, 0], X_reduced[clusters == label, 1], color=color,
                    label=f'Cluster {label}')
    plt.title(title)
    plt.xlabel('Component 1')
    plt.ylabel('Component 2')
    plt.legend()
    plt.savefig(f"./Data/DTW/CSJ/Tries_2/{file_name}.png")
    plt.close()

# # Execute dimensionality reduction and clustering
# perform_analysis("PCA", PCA(n_components=2))
# perform_analysis("MDS", MDS(n_components=2))
# perform_analysis("Isomap", Isomap(n_components=2, n_neighbors=30))
# perform_analysis("LLE", LocallyLinearEmbedding(n_components=2, n_neighbors=30, method='standard'))
# perform_analysis("MLLE", LocallyLinearEmbedding(n_components=2, n_neighbors=30, method='modified'))
# perform_analysis("Hessian LLE", LocallyLinearEmbedding(n_components=2, n_neighbors=30, method='hessian', eigen_solver='dense'))
# perform_analysis("Laplacian Eigenmaps", SpectralEmbedding(n_components=2, n_neighbors=30))


