import random
import matplotlib.pyplot as plt
import numpy as np
import csv
from sklearn.decomposition import PCA

#-------------------（一）
# Load data from CSV
def load_data1(file_path):
    data = []
    with open(file_path, newline='') as csvfile:
        reader = csv.reader(csvfile)
        next(reader)  # Skip header
        for row in reader:
            data.append([float(x) for x in row])
    return np.array(data)


# Min-max normalization
def min_max_normalize(data):
    min_vals = np.min(data, axis=0)
    max_vals = np.max(data, axis=0)
    norm_data = (data - min_vals) / (max_vals - min_vals)
    return norm_data, min_vals, max_vals


# Euclidean distance
def euclidean_distance(a, b):
    return np.sqrt(np.sum((a - b) ** 2))


# K-means clustering
def k_means(data, k, max_iter=100):
    n_samples, n_features = data.shape
    # Randomly initialize centroids
    centroids = data[random.sample(range(n_samples), k)]
    labels = np.zeros(n_samples)

    for _ in range(max_iter):
        # Assign labels based on closest centroid
        for i in range(n_samples):
            distances = [euclidean_distance(data[i], centroid) for centroid in centroids]
            labels[i] = np.argmin(distances)

        # Update centroids
        new_centroids = np.zeros((k, n_features))
        for j in range(k):
            points_in_cluster = data[labels == j]
            if points_in_cluster.size:
                new_centroids[j] = np.mean(points_in_cluster, axis=0)

        # Check for convergence (if centroids don't change)
        if np.all(centroids == new_centroids):
            break

        centroids = new_centroids

    return labels, centroids


# PCA for dimensionality reduction
def pca(data, n_components):
    # Note: For simplicity, we're using sklearn's PCA here. You can implement your own if needed.
    pca_model = PCA(n_components=n_components)
    reduced_data = pca_model.fit_transform(data)
    return reduced_data


# Plot scatter plot
def plot_scatter(data, labels, k, iteration):
    plt.figure()
    colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']  # More colors if needed
    for i in range(k):
        points = data[labels == i]
        plt.scatter(points[:, 0], points[:, 1], c=colors[i % len(colors)], label=f'Cluster {i + 1}')
    plt.title(f'K={k}, Iteration={iteration}')
    plt.legend()
    plt.xlabel('PCA Component 1')
    plt.ylabel('PCA Component 2')
    plt.grid(True)
    plt.show()

data_path = 'data/clustering1.csv'
data = load_data1(data_path)

# 归一化数据
norm_data, min_vals, max_vals = min_max_normalize(data)

ks = [2, 3, 5]
max_iter = 100


for idx, k in enumerate(ks):
    for iteration in range(2):
        # 执行 K-means
        labels, centroids = k_means(norm_data, k, max_iter)

        # 执行 PCA降维
        reduced_data = pca(norm_data, 2)

        # 可视化聚类结果
        plot_scatter(reduced_data, labels, k, iteration + 1)

#-------------------（二）

# 加载数据
def load_data2(file_path):
    with open(file_path, newline='') as csvfile:
        reader = csv.reader(csvfile)
        # 假设第一行是标题行，我们跳过它
        next(reader)
        # 将邻接矩阵转换为NumPy数组
        adj_matrix = []
        for row in reader:
            adj_matrix.append([int(x) for x in row])
    return np.array(adj_matrix)


# 根据MinPts构造核心对象子集
def find_core_objects(adj_matrix, min_pts):
    n_nodes = adj_matrix.shape[0]
    core_objects = set()
    for i in range(n_nodes):
        # 统计与节点i直接相连的节点数量（即邻接矩阵中第i行的和）
        neighbors = np.sum(adj_matrix[i]) - adj_matrix[i, i]  # 减去自环
        if neighbors >= min_pts:
            core_objects.add(i)
    return core_objects


# DBSCAN聚类算法
def dbscan(adj_matrix, min_pts):
    n_nodes = adj_matrix.shape[0]
    labels = [-1] * n_nodes
    cluster_id = 0
    core_objects = find_core_objects(adj_matrix, min_pts)
    visited = set()

    while core_objects:
        # 从核心对象子集中取出一个核心对象，同时从子集中删去该节点
        seed_node = core_objects.pop()
        visited.add(seed_node)
        queue = [seed_node]
        labels[seed_node] = cluster_id

        while queue:
            current_node = queue.pop(0)

            # 获取当前节点的所有邻居（直接相连的节点）
            neighbors = set()
            for i, is_connected in enumerate(adj_matrix[current_node]):
                if is_connected == 1 and i != current_node and i not in visited:
                    neighbors.add(i)

            for neighbor in neighbors:
                visited.add(neighbor)

                # 如果邻居是核心对象，则将其未访问的邻居加入队列
                if neighbor in core_objects:
                    core_objects.remove(neighbor)
                # if labels[neighbor] == -1:  # 如果邻居是噪声点，则将其标记为当前聚类
                #     labels[neighbor] = cluster_id
                #     queue.append(neighbor)

        cluster_id += 1

    return labels


# 计算聚类系数
def calculate_clustering_coefficient(adj_matrix, labels, cluster_id):
    n_nodes_in_cluster = np.sum(labels == cluster_id)
    if n_nodes_in_cluster < 2:
        return 0  # 如果聚类中节点数少于2，则聚类系数为0

    # 提取聚类内部的子矩阵
    cluster_indices = np.where(labels == cluster_id)[0]
    cluster_adj_matrix = adj_matrix[cluster_indices][:, cluster_indices]

    # 计算聚类内部边的总数
    n_edges_in_cluster = np.sum(cluster_adj_matrix) // 2

    # 计算聚类系数
    clustering_coefficient = (2 * n_edges_in_cluster) / (n_nodes_in_cluster * (n_nodes_in_cluster - 1))
    return clustering_coefficient


data_path = 'data/clustering2.csv'
adj_matrix = load_data2(data_path)

min_pts_values = [10, 15, 20]
for min_pts in min_pts_values:
    labels = dbscan(adj_matrix, min_pts)
    n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
    print(f"MinPts={min_pts}, 聚类数量={n_clusters}")

    clustering_coefficients = []
    for cluster_id in range(n_clusters):
        coef = calculate_clustering_coefficient(adj_matrix, labels, cluster_id)
        clustering_coefficients.append(coef)
        print(f"  聚类 {cluster_id + 1}: 聚类系数={coef}")