import pandas as pd
import glob
import re
import torch
from kmeans_pytorch import kmeans
from sklearn.preprocessing import MinMaxScaler

# 解析 Train_Taget.txt 文件，提取每个场景的聚类数
def get_cluster_count_from_target_file(target_file='Train_Taget.txt'):
    scene_clusters = {}
    
    with open(target_file, 'r') as f:
        lines = f.readlines()
        for line in lines:
            # 修正正则表达式，确保最后一项雷达没有逗号时也能正确解析
            match = re.match(r'场景(\d+)：(.*)', line.strip())
            if match:
                scene_num = int(match.group(1))  # 场景编号
                radar_info = match.group(2).split(',')
                # 统计雷达种类数，作为聚类数 k
                unique_radar_types = set(re.search(r'(\d+)', radar).group(1) for radar in radar_info)
                cluster_count = len(unique_radar_types)  # 聚类数为雷达种类数
                scene_clusters[scene_num] = cluster_count
    
    return scene_clusters

# 获取场景的聚类数
scene_clusters = get_cluster_count_from_target_file('Train_Taget.txt')

# 获取所有CSV文件路径
files = glob.glob('Train_processed_data/Train_PDW*_processed.csv')

# 按数字顺序排序文件
def extract_number_from_filename(filename):
    # 利用正则表达式从文件名中提取数字部分
    match = re.search(r'(\d+)', filename)
    return int(match.group(0)) if match else -1

# 排序文件
files.sort(key=extract_number_from_filename)

output_folder = 'Train_output_devide'  # 新文件保存路径

# 确保 Train_output_devide 文件夹存在
import os
os.makedirs(output_folder, exist_ok=True)

# 逐个处理文件
for file in files:
    try:
        # 提取文件名中的场景编号
        scene_num = int(re.search(r'(\d+)', file).group(0))
        
        # 获取该场景对应的聚类数
        if scene_num in scene_clusters:
            num_clusters = scene_clusters[scene_num]
            print(f"场景{scene_num}的聚类数为：{num_clusters}")  # 打印聚类数
        else:
            print(f"Warning: Scene {scene_num} not found in Train_Taget.txt. Defaulting to k=3.")
            num_clusters = 3  # 默认使用3个聚类
        
        # 读取当前文件 编码为ISO-8859-1（原数据集非UTF-8编码）
        temp_data = pd.read_csv(file, usecols=[0, 2, 3, 4, 5], encoding='ISO-8859-1')
        print(f"Reading file: {file}, shape: {temp_data.shape}")
        
        # 提取特征变量
        X = temp_data.values
        
        # 使用 MinMaxScaler 进行归一化
        scaler = MinMaxScaler()
        X_scaled = scaler.fit_transform(X)
        
        # 将归一化后的数据转换为 PyTorch Tensor
        X_tensor = torch.tensor(X_scaled, dtype=torch.float32)
        
        # 使用 KMeans 聚类
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        
        cluster_ids_x, cluster_centers = kmeans(
            X=X_tensor, num_clusters=num_clusters, distance='euclidean', tol=1e-4, device=device
        )
        
        # 将聚类结果添加到数据的第7列
        temp_data['cluster_ids_x_devide'] = cluster_ids_x.numpy()
        
        # 将修改后的文件保存到 Train_output_devide 文件夹
        output_file = os.path.join(output_folder, os.path.basename(file))
        temp_data.to_csv(output_file, index=False)
        print(f"Saved clustered file: {output_file}")
        
    except UnicodeDecodeError:
        print(f"Failed to read {file} with ISO-8859-1 encoding.")
        # 你可以尝试其他编码，例如'latin1'或'cp1252'