import pandas as pd
import numpy as np
from sklearn.manifold import TSNE, Isomap, LocallyLinearEmbedding
from sklearn.decomposition import PCA
from sklearn.cluster import DBSCAN
from sklearn.cluster import OPTICS
import matplotlib.pyplot as plt
import os
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import silhouette_score, davies_bouldin_score, calinski_harabasz_score
from sklearn.manifold import MDS

# 这里在构造DTW矩阵之后，也进行了一定的聚类方法的尝试，但还不够，Tries应该是真正地调试过程

# 假设df是从CSV文件读取的DataFrame
df = pd.read_csv("Data/DTW/CSJ/dtw_distances2.csv")

# 获取所有唯一的MMSI
all_mmsis = np.unique(df[['MMSI1', 'MMSI2']].values)

# 初始化一个空的相似度矩阵，尺寸为n*n，其中n是MMSI的数量
similarity_matrix = np.zeros((len(all_mmsis), len(all_mmsis)))

# 建立MMSI到矩阵索引的映射
mmsi_to_index = {mmsi: index for index, mmsi in enumerate(all_mmsis)}

# 填充相似度矩阵
for _, row in df.iterrows():
    i, j = mmsi_to_index[row['MMSI1']], mmsi_to_index[row['MMSI2']]
    # DTW距离越小，相似度越高，但是此刻貌似还不太需要将距离转换为相似度
    similarity_matrix[i, j] = similarity_matrix[j, i] = row['DTW_Distance']

# 如果需要，将距离转换为相似度。这里是一个简单的转换例子，具体转换方法根据实际应用场景调整
# similarity_matrix = np.max(similarity_matrix) - similarity_matrix

# 检查矩阵
# print(similarity_matrix)

# 首先，标准化相似度矩阵
similarity_matrix_normalized = StandardScaler().fit_transform(similarity_matrix)

# 使用t-SNE进行降维
tsne = TSNE(n_components=2, perplexity=45, n_iter=5000)
X_tsne = tsne.fit_transform(similarity_matrix_normalized)

# 使用PCA进行降维
# pca = PCA(n_components=2)
# X_pca = pca.fit_transform(similarity_matrix_normalized)

# 使用DBSCAN进行聚类，聚类结果（clusters数组）中的每个位置对应于all_mmsis中的相同位置的MMSI。
# dbscan = DBSCAN(eps=0.5, min_samples=5)
# dbscan = DBSCAN(eps=0.03, min_samples=20)
# dbscan = DBSCAN(eps=0.3, min_samples=10)  # 更小的eps和更大的min_samples
# # clusters包含每个数据点的聚类标签
# clusters_dbscan_tsne = dbscan.fit_predict(X_tsne)
# clusters_dbscan_pca = dbscan.fit_predict(X_pca)

# 使用OPTICS进行聚类
optics = OPTICS(min_samples=20, xi=0.03, min_cluster_size=0.05)  # 增加min_samples和调整min_cluster_size
clusters_optics_tsne = optics.fit_predict(X_tsne)
# clusters_optics_pca = optics.fit_predict(X_pca)

# 使用matplotlib库绘制降维后的数据点，并根据聚类标签给不同的聚类上色，这样可以直观地观察不同聚类（即航行模式）之间的分布
def plot_clusters(X_reduced, clusters, title, file_name):
    # 检查并创建目录
    output_dir = "./Data/DTW/CSJ/T-SNE_Optics2"
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # 保存聚类结果到CSV文件
    # clusters_df = pd.DataFrame({'Cluster': clusters})
    # csv_path = os.path.join(output_dir, f"{file_name}_clusters_information.csv")
    # clusters_df.to_csv(csv_path, index=False)
    # print(f"Clusters saved to {csv_path}")

    # 绘制并保存聚类图
    plt.figure(figsize=(8, 6))
    unique_labels = np.unique(clusters)
    for label in unique_labels:
        color = 'k' if label == -1 else plt.cm.jet(float(label) / np.max(unique_labels + 1))
        member_mask = (clusters == label)
        plt.scatter(X_reduced[member_mask, 0], X_reduced[member_mask, 1], color=color, s=10, label=f'Cluster {label}')
    plt.title(title)
    plt.xlabel('Component 1')
    plt.ylabel('Component 2')
    plt.legend(loc='best')

    # 保存聚类图像
    image_path = os.path.join(output_dir, f"{file_name}_clusters.png")
    plt.savefig(image_path)
    print(f"Cluster plot saved to {image_path}")

    # 保存MMSI对应的聚类结果：all_mmsis是所有唯一MMSI的数组
    mmsi_cluster_pairs = list(zip(all_mmsis, clusters_optics_tsne))
    # 转换为DataFrame以便于操作和保存
    mmsi_cluster_df = pd.DataFrame(mmsi_cluster_pairs, columns=['MMSI', 'Cluster'])
    print(mmsi_cluster_df.head())  # 打印前几行以检查结果
    # 保存到CSV文件
    mmsi_cluster_df.to_csv(os.path.join(output_dir, "mmsi_cluster_pairs.csv"), index=False)

# 使用t-SNE结果进行可视化
# plot_clusters(X_tsne, clusters_dbscan_tsne, 'Clusters found by DBSCAN with t-SNE',  'dbscan_tsne')
# 使用PCA结果进行可视化
# plot_clusters(X_pca, clusters_optics_pca, 'Clusters found by OPTICS with PCA', 'optics_pca')
plot_clusters(X_tsne, clusters_optics_tsne, 'Clusters found by OPTICS with t-SNE', 'optics_tsne')
# plot_clusters(X_pca, clusters_dbscan_pca, 'Clusters found by DBSCAN with PCA', 'dbscan_pca')

# 降维与聚类
def perform_dimensionality_reduction_and_clustering(name, reducer, clusters):
    X_reduced = reducer.fit_transform(similarity_matrix_normalized)
    for cluster_name, cluster_algo in clusters:
        clusters_labels = cluster_algo.fit_predict(X_reduced)
        plot_clusters(X_reduced, clusters_labels, f"Clusters found by {cluster_name} with {name}", f"{cluster_name}_{name}")

def calculate_silhouette(X, labels):
    """
    计算轮廓系数。
    X: 数据集（降维后的数据）。
    labels: 聚类标签。
    """
    # 排除噪声点
    mask = labels != -1
    if np.sum(mask) == 0:
        return -1  # 如果所有点都是噪声，则返回-1
    return silhouette_score(X[mask], labels[mask])

# 计算轮廓系数
silhouette_optics_tsne = calculate_silhouette(X_tsne, clusters_optics_tsne)
print(f"Silhouette Coefficient for OPTICS with t-SNE: {silhouette_optics_tsne}")

# 计算戴维森堡丁指数（DBI）
db_index_optics_tsne = davies_bouldin_score(X_tsne, clusters_optics_tsne)
print(f"Davies-Bouldin Index for OPTICS with t-SNE: {db_index_optics_tsne}")

# 计算卡林斯基-哈拉巴斯指数（CHI）
chi_index_optics_tsne = calinski_harabasz_score(X_tsne, clusters_optics_tsne)
print(f"Calinski-Harabasz Index for OPTICS with t-SNE: {chi_index_optics_tsne}")

# 如果有其他数据集和聚类结果
# silhouette_optics_pca = calculate_silhouette(X_pca, clusters_optics_pca)
# print(f"Silhouette Coefficient for OPTICS with PCA: {silhouette_optics_pca}")

# # Perform dimensionality reduction using Isomap
# isomap = Isomap(n_components=2, n_neighbors=50)
# X_isomap = isomap.fit_transform(similarity_matrix_normalized)
#
# # Clustering on Isomap reduced data
# clusters_dbscan_isomap = DBSCAN(eps=0.5, min_samples=10).fit_predict(X_isomap)
# # silhouette_dbscan_isomap = silhouette_score(X_isomap, clusters_dbscan_isomap)
# # print(f"Silhouette Coefficient for DBSCAN with Isomap: {silhouette_dbscan_isomap}")
#
# clusters_optics_isomap = OPTICS(min_samples=20, xi=0.005, min_cluster_size=0.03).fit_predict(X_isomap)
# silhouette_optics_isomap = silhouette_score(X_isomap, clusters_optics_isomap)
# print(f"Silhouette Coefficient for OPTICS with Isomap: {silhouette_optics_isomap}")
#
# # Perform dimensionality reduction using LLE
# lle = LocallyLinearEmbedding(n_components=2, n_neighbors=50, method='standard')
# X_lle = lle.fit_transform(similarity_matrix_normalized)
#
# # Clustering on LLE reduced data
# clusters_dbscan_lle = DBSCAN(eps=0.5, min_samples=10).fit_predict(X_lle)
# # silhouette_dbscan_lle = silhouette_score(X_lle, clusters_dbscan_lle)
# # print(f"Silhouette Coefficient for DBSCAN with LLE: {silhouette_dbscan_lle}")
#
# clusters_optics_lle = OPTICS(min_samples=20, xi=0.005, min_cluster_size=0.03).fit_predict(X_lle)
# silhouette_optics_lle = silhouette_score(X_lle, clusters_optics_lle)
# print(f"Silhouette Coefficient for OPTICS with LLE: {silhouette_optics_lle}")
#
# # Visualization of OPTICS clustering on Isomap-reduced data
# plot_clusters(X_isomap, clusters_optics_isomap, 'Clusters found by OPTICS with Isomap', 'optics_isomap')
#
# # Visualization of OPTICS clustering on LLE-reduced data
# plot_clusters(X_lle, clusters_optics_lle, 'Clusters found by OPTICS with LLE', 'optics_lle')
#
# # 使用MDS进行降维
# mds = MDS(n_components=2)
# X_mds = mds.fit_transform(similarity_matrix_normalized)
#
# # 使用OPTICS进行聚类
# optics_adjusted = OPTICS(min_samples=10, xi=0.005, min_cluster_size=0.02)
# clusters_optics_mds = optics_adjusted.fit_predict(X_mds)
# silhouette_optics_mds = silhouette_score(X_mds, clusters_optics_mds)
# print(f"Silhouette Coefficient for OPTICS with MDS: {silhouette_optics_mds}")