import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import glob
import torch
import re  # 导入正则表达式模块
from kmeans_pytorch import kmeans, kmeans_predict
from sklearn.metrics import silhouette_score, calinski_harabasz_score, davies_bouldin_score
from sklearn.preprocessing import MinMaxScaler

# 获取所有CSV文件路径
files = glob.glob('Train_processed_data/Train_PDW*_processed.csv')

# 按数字顺序排序文件
def extract_number_from_filename(filename):
    # 利用正则表达式从文件名中提取数字部分
    match = re.search(r'(\d+)', filename)
    return int(match.group(0)) if match else -1

# 排序文件
files.sort(key=extract_number_from_filename)

# 初始化一个空的DataFrame来存储所有数据
data = pd.DataFrame()

# 读取所有文件并合并
for file in files:
    try:
        # 发现UTF-8编码不可读取，怀疑文件编码不对，尝试ISO-8859-1编码
        # 取1 3 4 5 6列
        temp_data = pd.read_csv(file, usecols=[0, 2, 3, 4, 5], encoding='ISO-8859-1')
        print(f"Reading file: {file}, shape: {temp_data.shape}")
        data = pd.concat([data, temp_data], ignore_index=True)
    except UnicodeDecodeError:
        print(f"Failed to read {file} with ISO-8859-1 encoding.")
        # 你可以尝试其他编码，例如'latin1'或'cp1252'

# 提取特征变量
X = data.values

# 使用 MinMaxScaler 进行归一化，将数据缩放到 [0, 1] 范围内
scaler = MinMaxScaler()
X_scaled = scaler.fit_transform(X)

# 将归一化后的数据转换为 PyTorch Tensor
X_tensor = torch.tensor(X_scaled, dtype=torch.float32)

# 使用kmeans-pytorch进行聚类
num_clusters = 50
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# 运行KMeans聚类 欧式距离 容忍度1e-4
cluster_ids_x, cluster_centers = kmeans(
    X=X_tensor, num_clusters=num_clusters, distance='euclidean', tol=1e-4, device=device
)
# 聚类结果
print(cluster_ids_x)
# 聚类中心
print(cluster_centers)

# 可视化聚类结果（这里绘制前两个特征）
# plt.figure(figsize=(4, 3), dpi=160)
# plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=cluster_ids_x, cmap='cool')

# 绘制聚类中心
# plt.scatter(
#     cluster_centers[:, 0], cluster_centers[:, 1],
#     c='yellow', alpha=0.6, edgecolors='black', linewidths=2
# )

# plt.title('Clusters of Customers')
# plt.xlabel('Feature 1')
# plt.ylabel('Feature 2')
# plt.tight_layout()
# plt.show()

# 计算并打印评价指标（使用 sklearn 中的评估函数）
# silhouette_avg = silhouette_score(X_scaled, cluster_ids_x.numpy())
# print(f'Silhouette Score: {silhouette_avg}')

# ch_score = calinski_harabasz_score(X_scaled, cluster_ids_x.numpy())
# print(f'Calinski-Harabasz Score: {ch_score}')

# db_score = davies_bouldin_score(X_scaled, cluster_ids_x.numpy())
# print(f'Davies-Bouldin Score: {db_score}')

# # WCSS（Within-Cluster Sum of Squares）
# wcss = torch.sum((X_tensor - cluster_centers[cluster_ids_x]) ** 2).item()
# print(f'WCSS (Within-Cluster Sum of Squares): {wcss}')
