import os,shutil
from warnings import simplefilter
simplefilter(action='ignore',category=FutureWarning)
import numpy as np
import math
from keras.callbacks import ReduceLROnPlateau, LambdaCallback
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot#,savefig
import time
import keras
from keras.models import Model#,Sequential
from keras.optimizers import Adam
from keras.layers import Dense,Dropout,BatchNormalization,GRU,Lambda
from sklearn import preprocessing
import random as rd
import tensorflow as tf
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, adjusted_rand_score
from sklearn.metrics import silhouette_score, davies_bouldin_score, calinski_harabasz_score
import numpy as np
from collections import defaultdict
import logging
from collections import Counter
from matplotlib.lines import Line2D
from keras.callbacks import Callback, LearningRateScheduler
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
  try:
    tf.config.experimental.set_virtual_device_configuration(
        gpus[0],
        [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*4)])  # 限制GPU内存使用为4GB
    logical_gpus = tf.config.experimental.list_logical_devices('GPU')
    print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
  except RuntimeError as e:
    print(e)
tf.keras.backend.clear_session()  # 清理session
import keras.backend as K
from keras.layers import Input
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from numpy.linalg import eig
import io
import base64
import re
from sklearn.preprocessing import LabelEncoder
def initcenter(x,k):                                                       #生成随机初始中心点
    b = math.modf(time.time())
    np.random.seed(int(b[0]*1000))
    flag=True
    a=[]
    while flag:
        d=np.random.choice(len(x)-1)
        if d not in a:
            a.append(d)
        if len(a)==k:flag=False                                 #不允许出现重复的中心点
    #print(a)
    #print(2)
    return a
def find_optimal_k(data, k_range,parameters):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    max_silhouette_score = -1
    optimal_k = None
    silhouette_scores=[]
    for k in k_range:
        
        kmeans = KMeans(n_clusters=k, random_state=42)
        cluster_labels = kmeans.fit_predict(data)
        silhouette_avg = silhouette_score(data, cluster_labels,metric='precomputed')
        silhouette_scores.append(silhouette_avg)
        if silhouette_avg > max_silhouette_score:
            max_silhouette_score = silhouette_avg
            optimal_k = k
    
    plt.plot(k_range, silhouette_scores, 'bx-')
    plt.xlabel('Number of Clusters (K)')
    plt.ylabel('Silhouette Score')
    plt.title('Silhouette Score for Each K')
    # if enable_debug_preview:
    #     plt.show()
    return optimal_k
def nearest(kc,i,k,disjz):
    d=[]
    for n in range(k):
        d.append(disjz[kc[n],i])                                             #返回该样本到所有中心点的距离，通过排序的方法定的，不会重复
    #print(d)        #可以用来看计算出的离中心点的距离，和本身应该是0，可用于检验
    w=np.where(d==np.min(d))
    return w[0][0]

def xclassify(x,y,kc,k,disjz):
    for i in range (len(x)):  #对数组的每个值分类
        y[i]=nearest(kc,x[i],k,disjz)             #
    return y

def newcenten(m,ycen,disjz):
    cen=ycen                                    #可能有输入之后只有自己一条的
    for item in m:
        continue
    a=float('inf')
    for i in item:
        distance=0
        for n in item:    
            c=disjz[i,n]
            distance=distance+c
        if a>distance:
            a=distance
            cen=i
    return cen

def kcmean (x,y,kc,k,disjz):  #计算各聚类新均值
    l=list(kc)
    flag=False
    for c in range(k):
        m=np.where(y==c)
        n=newcenten(m,l[c],disjz)
        if l[c]!=n:
            l[c]=n
            flag=True  #聚类中心发生变化
    #print('1')
    return (np.array(l),flag)      
  
def deifen(y,disjz):                                                    #用轮廓系数的方法来判断分类的优劣，输入为一个点，输出为该点得分。
    s=[]    
    for i in range(len(disjz)): 
        a=0
        b=[]
        dd=set(y)
        for n in dd:
            m=np.where(y==n)
            for item in m:
                continue
            distance=0                #到一个簇的距离之和
            for j in item:      #遍历该簇中所有项，距离求和
                c=disjz[i,j]
                distance=distance+c
            distance=distance/len(item)
            if (item == i).any():            #本簇，加入a
                a=distance
            else:                            #非本簇，加入b
                b.append(distance)           
        if b==[]:
            print(y)
        b=min(b)
        z=[a,b]
        if a==0:                             #如果簇内只有一条样本，s为0
            s.append(0)
        else:
            s.append((b-a)/max(z)) 
    s=np.mean(s)                                    
    return s

def julei(k,x,yi,disjz):             #k,类别数，x指代每一个样本（在这里是1到end），yi是
    a=-float('inf')
    for i in range(10):                           #每一个计算十次，取其中最好的一次
        kc=initcenter(x,k)
        y=yi
        flag=True
        count=1
        while flag:
            count=1+count
            y = xclassify(x,y,kc,k,disjz)                     #y就是每个样本分类后的类别
            kc,flag = kcmean(x,y,kc,k,disjz)                 #两种使flag变False 的方法，中心点不变和计算十次
            print(flag)
            if count>10:
                flag=False
                print('z')
        if deifen(y,disjz)>a:
            yrr=y.copy()
            a=deifen(y,disjz)     
            kcr=kc
    b=[]
    for j in range(k):
        a=[]
        for i in range(len(disjz)):
            if (yrr[i])==j:
                a.append(i)
        b.append(a)          
    return b,kcr
def demo(lst, k):
    return lst[k:] + lst[:k]
def sgn(x):
    if x >=0 and x<1/3 :
        return 0
    elif x<5/6 and x>=1/2:
        return 1
    elif x>=2/6 and x<3/6:
        return 6*x-2
    elif x>=5/6 and x<=1:
        return -6*x+6
def guiyi(disjz):
    [aa,bb]=disjz.shape
    disjz=disjz.reshape([aa*bb,1])
    min_max=preprocessing.MinMaxScaler()
    disjz=min_max.fit_transform(disjz)
    disjz=disjz.reshape([aa,bb])
    return disjz
def pca(X,k):
    X = X - X.mean(axis = 0) #向量X去中心化
    X_cov = np.cov(X.T, ddof = 0) #计算向量X的协方差矩阵，自由度可以选择0或1
    eigenvalues,eigenvectors = eig(X_cov) #计算协方差矩阵的特征值和特征向量
    klarge_index = eigenvalues.argsort()[-k:][::-1] #选取最大的K个特征值及其特征向量
    k_eigenvectors = eigenvectors[klarge_index] #用X与特征向量相乘
    return np.dot(X, k_eigenvectors.T)
def load_data(parameters):
    paths = parameters["paths"]
    result = []
    length=parameters["length"]
    for path in paths:
        print(f"Processing path: {path}")
        try:
            label = extract_label(path)
            data = np.load(path)  # 加载.npz 或 .npy 文件
            if len(data)<length:
                data=np.array(list(data)+[[0 for i in range(3)]]*(length-len(data)))
            data=data[0:length,:]
            print(f"Data shape: {data.shape}")
            if data.ndim != 2 or data.shape[1] != 3:
                raise ValueError(f"Data shape {data.shape} is not valid. Expected a 2D array with 3 features per point.")
            result.append([path, label, data])
        except ValueError as e:
            print(e)
            continue

    return result
class lstm_model:
    def __init__(self,length,cc):
        #model = Sequential()
        self.block1=GRU(40, return_sequences=True,input_shape=[length,cc],name='c1', recurrent_activation='sigmoid')#
        self.block2=GRU(20,name='d1', recurrent_activation='sigmoid')   
#         self.flatten = Flatten(name = 'flatten')
    def call(self, inputs):
        x = inputs
        x = self.block1(x)
        outputs = self.block2(x)
        # = self.flatten(x)
        return outputs
class delstm_model:
    def __init__(self,length,cc):
        #model = Sequential()
        self.block0=RepeatVector(length)
        self.block1=GRU(20,return_sequences=True,name='e1', recurrent_activation='sigmoid')
        self.block2=GRU(40,return_sequences=True,name='f1', recurrent_activation='sigmoid')#
        self.block3=TimeDistributed(Dense(cc,name='g1'))

    def call(self, inputs):
        x = inputs
        x = self.block0(x)
        x = self.block1(x)
        x = self.block2(x)
        outputs = self.block3(x)
        # = self.flatten(x)
        return outputs
#-------------------------#
#   创建孪生神经网络
#-------------------------#
def siamese(input_shape,length,cc):
    g_model = lstm_model(length,cc)
    g_model2 = delstm_model(length,cc)

    input_1 = Input(shape=input_shape)
    input_2 = Input(shape=input_shape)
    encoded_image_1 = g_model.call(input_1)  
    encoded_image_2 = g_model.call(input_2)
    outt1=g_model2.call(encoded_image_1)
    outt2=g_model2.call(encoded_image_2)
    out = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))([encoded_image_1, encoded_image_2])
    #out = Dense(2,activation='relu')(out)
    out = BatchNormalization()(out)
    out = Dropout(0.1)(out)
    
    
    outt= Lambda(lambda tensors: tf.reduce_mean(tensors,axis=1,keepdims = True))(out)
    model = Model([input_1, input_2], [outt1,outt2,outt])
    return model 
def extract_label(path):
    # 使用正则表达式从路径中提取标签
    match = re.search(r'Variflight_(\w+)_\d{8}\.xls\d+\.npy$', path)
    if match:
        return match.group(1)
    else:
        raise ValueError(f"无法从路径 {path} 中提取标签")
def data_plot(parameters, result_1, a, width):
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)
    k = len(a)  # 簇类数量
    paths = []
    datas = result_1["selected_datas"]
    for item in datas:
        path = item[0]
        paths.append(path) 
    y = np.zeros(len(paths), dtype=int)

    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]] = i

    t = np.arange(0, 0.99, 0.01)
    yan = [sgn(i) for i in t]
    yan1 = demo(yan, 33)  # 红色
    yan2 = demo(yan, 66)  # 绿色
    yan3 = yan           # 蓝色

    # 清除当前 figure 中的所有 axes
    fig = plt.figure()
    ax = fig.gca()
    fig.set_size_inches(10, 6)

    legend_elements = []  # 用于存储图例元素
    for c in range(k):
        m = np.where(y == c)
        cc = (yan1[math.floor(c / (k + 1) * 99)], yan2[math.floor(c / (k + 1) * 99)], yan3[math.floor(c / (k + 1) * 99)])

        for i in m[0]:  # 直接使用m[0]来迭代索引
            dfc = datas[i][2][0:300, 0:width]  # 提取航迹数据
            # 过滤掉补充的零点
            valid_points = dfc[~np.all(dfc == 0, axis=1)]
            if valid_points.size > 0:
                ax.plot(valid_points[:, 1], valid_points[:, 2], linewidth=0.5, color=cc, alpha=0.8)  # 绘制航迹

        # 创建一个图例元素
        legend_elements.append(Line2D([0], [0], color=cc, lw=4, label=f'Cluster {c}'))

    # 添加图例
    ax.legend(handles=legend_elements)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    if enable_debug_preview:
        plt.show()
    image_base64_julei = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_julei
# 定义函数
def map_clusters_to_labels(true_labels, cluster_labels):
    # 初始化一个字典来存储每个聚类的最佳映射
    cluster_to_label = {}

    # 对于每个唯一的聚类标签
    for cluster_id in np.unique(cluster_labels):
        # 获取属于当前聚类的所有样本的真实标签
        labels_in_cluster = true_labels[cluster_labels == cluster_id]
        print(labels_in_cluster)
        # 使用 Counter 来找出最常见的标签
        if len(labels_in_cluster) > 0:
            most_common_label = Counter(labels_in_cluster).most_common(1)[0][0]
        else:
            most_common_label = -1  # 如果当前聚类为空，映射为 -1

        # 将当前聚类映射到最常见的标签
        cluster_to_label[cluster_id] = most_common_label

    # 创建一个新的数组来存储映射后的标签
    mapped_labels = np.array([cluster_to_label.get(label, -1) for label in cluster_labels])

    return mapped_labels, cluster_to_label

def ensure_fixed_length(data, length, fill_value=0):
    """
    确保数据具有固定的长度。
    
    参数:
    data: 输入的 (n_points, 3) 形状的 NumPy 数组。
    length: 目标长度。
    fill_value: 用于填充的值，默认为 0。
    
    返回:
    固定长度的数据数组。
    """
    if len(data) >= length:
        return data[:length]
    else:
        padding = np.full((length - len(data), 3), fill_value)
        return np.vstack((data, padding))

def calculate_silhouette_coefficient(datas, fixed_length=None):
    """
    计算轮廓系数。
    
    参数:
    datas: 一个列表，其中每个元素是一个三元组 [file_path, label, features]，
           表示文件路径、类别标签和样本特征。features 是一个 (n_points, 3) 的二维数组。
    fixed_length: 如果指定，则所有特征向量将被标准化到此长度。
    
    返回:
    float: 轮廓系数。
    """
    # 提取特征和标签，并标准化特征向量长度
    if fixed_length is None:
        # 如果没有指定固定长度，可以选择所有样本中的最大长度
        lengths = [item[2].shape[0] for item in datas]
        fixed_length = max(lengths)

    features = np.array([ensure_fixed_length(item[2], fixed_length) for item in datas])
    labels = np.array([item[1] for item in datas])

    if len(set(labels)) < 2:
        raise ValueError("轮廓系数需要至少两个簇来计算")

    return silhouette_score(features.reshape(len(features), -1), labels)  # 展平特征向量以适应 sklearn 函数

def calculate_davies_bouldin_index(datas, fixed_length=None):
    """
    计算Davies-Bouldin Index。
    
    参数:
    datas: 一个列表，其中每个元素是一个三元组 [file_path, label, features]，
           表示文件路径、类别标签和样本特征。features 是一个 (n_points, 3) 的二维数组。
    fixed_length: 如果指定，则所有特征向量将被标准化到此长度。
    
    返回:
    float: Davies-Bouldin Index。
    """
    # 提取特征和标签，并标准化特征向量长度
    if fixed_length is None:
        lengths = [item[2].shape[0] for item in datas]
        fixed_length = max(lengths)

    features = np.array([ensure_fixed_length(item[2], fixed_length) for item in datas])
    labels = np.array([item[1] for item in datas])

    return davies_bouldin_score(features.reshape(len(features), -1), labels)  # 展平特征向量以适应 sklearn 函数

def calculate_calinski_harabasz_index(datas, fixed_length=None):
    """
    计算Calinski-Harabasz Index。
    
    参数:
    datas: 一个列表，其中每个元素是一个三元组 [file_path, label, features]，
           表示文件路径、类别标签和样本特征。features 是一个 (n_points, 3) 的二维数组。
    fixed_length: 如果指定，则所有特征向量将被标准化到此长度。
    
    返回:
    float: Calinski-Harabasz Index。
    """
    # 提取特征和标签，并标准化特征向量长度
    if fixed_length is None:
        lengths = [item[2].shape[0] for item in datas]
        fixed_length = max(lengths)

    features = np.array([ensure_fixed_length(item[2], fixed_length) for item in datas])
    labels = np.array([item[1] for item in datas])

    return calinski_harabasz_score(features.reshape(len(features), -1), labels)

def average_dicts(dict_list):
    # 初始化一个字典来存储每个键的总和
    sum_dict = dict.fromkeys(dict_list[0], 0)
    # 初始化一个字典来存储每个键出现的次数
    count_dict = dict.fromkeys(dict_list[0], 0)

    # 遍历字典列表，累加每个键的值并更新计数器
    for d in dict_list:
        for key in d:
            if key not in sum_dict:
                sum_dict[key] = 0
                count_dict[key] = 0
            sum_dict[key] += d[key]
            count_dict[key] += 1

    # 计算平均值
    average_dict = {key: sum_dict[key] / count_dict[key] for key in sum_dict}

    return average_dict
def save_clusters_to_files(out_path, julei_datas):
    """
    将聚类结果按照类别标签保存到指定的输出文件夹中。

    :param out_path: 输出文件夹路径
    :param julei_datas: 包含路径、标签和数据的列表，格式为 [(path, label, data), ...]
    """
    # 确保输出文件夹存在
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    # 创建一个字典来存储每个类别的数据
    cluster_dict = {}

    # 遍历每个数据项
    for path, label, data in julei_datas:
        if label not in cluster_dict:
            cluster_dict[label] = []
        cluster_dict[label].append((os.path.basename(path), data))

    # 获取所有现有的聚类文件夹，并删除它们
    existing_clusters = [d for d in os.listdir(out_path) if d.startswith('cluster_') and os.path.isdir(os.path.join(out_path, d))]
    for cluster_dir in existing_clusters:
        cluster_folder_path = os.path.join(out_path, cluster_dir)
        try:
            shutil.rmtree(cluster_folder_path)
            print(f"已删除旧的聚类文件夹 {cluster_folder_path}")
        except Exception as e:
            print(f"删除旧的聚类文件夹 {cluster_folder_path} 时出错: {e}")

    # 遍历每个聚类标签
    for label, data_list in cluster_dict.items():
        # 创建一个文件夹来保存当前聚类的所有数据
        cluster_folder = os.path.join(out_path, f'cluster_{label}')
        os.makedirs(cluster_folder, exist_ok=True)  # 使用 exist_ok=True 来防止竞态条件

        # 保存每个数据项
        for file_name, data in data_list:
            file_path = os.path.join(cluster_folder, file_name)
            np.save(file_path, data)

    print(f"聚类结果已保存到 {out_path}")
def add_average_to_dict(data_dict, weight):
    # 计算共同键对应的值相乘后再相加
    result = sum(data_dict[key] * weight[key] for key in data_dict if key in weight)

    # 将平均值添加到字典中
    data_dict["average"] = result
    return data_dict, result
def data_julei(result_1, a):
    # 提取所有路径
    paths = [item[0] for item in result_1["selected_datas"]]
    
    # 初始化标签数组
    y = np.zeros(len(paths), dtype=int)
    
    # 根据 a 中的索引信息为每个路径分配标签
    for i in range(len(a)):
        for j in a[i]:
            y[j] = i
    
    # 定义宽度
    width = 3
    
    # 构建最终的输出列表
    datas = []
    for i, path in enumerate(paths):
        df = np.load(path)[:, 0:width]
        label = y[i]
        datas.append([path, label, df])
    
    return datas
def plot_dual_bars(f1_scores, accuracies, true_labels, parameters, title='聚类性能'):
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)

    # 将字典转换为列表
    labels = list(f1_scores.keys())

    set1 = set(true_labels)
    set2 = set(labels)

    # 计算 list1 中缺少的元素
    missing_elements = set1 - set2
    list_missing_elements = list(missing_elements)
    # 使用字典推导式创建字典
    result_dict = {key: 0 for key in list_missing_elements}

    labels = list_missing_elements + labels

    # 使用 update 方法合并字典
    f1_scores.update(result_dict)
    accuracies.update(result_dict)

    f1_values = [f1_scores[label] for label in labels]
    accuracy_values = [accuracies[label] for label in labels]

    # 设置条形图的位置和宽度
    x = np.arange(len(labels))  # 类别的索引位置
    width = 0.35  # 条形图的宽度

    # 创建图形和坐标轴
    fig, ax = plt.subplots()

    # 在 ax1 上绘制 F1 分数的条形图
    rects1 = ax.bar(x - width/2, f1_values, width, label='纯度', color='black')
    # 在 ax2 上绘制准确率的条形图
    rects2 = ax.bar(x + width/2, accuracy_values, width, label='准确率', color='white', edgecolor='black')

    # 添加一些文本描述
    ax.set_ylabel('性能')
    ax.set_title(title)
    ax.set_xticks(x)
    ax.set_xticklabels(labels)
    ax.legend()

    # 自动旋转 x 轴标签以避免重叠
    plt.xticks(rotation=45)

    # 设置 Y 轴的范围
    ax.set_ylim(0, 1.2)

    # 添加数值标签到条形图上方，并保留两位小数
    def autolabel(rects, ax):
        """Attach a text label above each bar in *rects*, displaying its height with two decimal places."""
        for rect in rects:
            height = rect.get_height()
            ax.annotate(f'{height:.2f}',  # 保留两位小数
                        xy=(rect.get_x() + rect.get_width() / 2, height),
                        xytext=(0, 3),  # 3 points vertical offset
                        textcoords="offset points",
                        ha='center', va='bottom', fontsize=8)

    autolabel(rects1, ax)
    autolabel(rects2, ax)

    # 显示图形
    plt.tight_layout()
    # if enable_debug_preview:
    #     plt.show()

    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')

    plt.close()
    return image_base64
def select_data(parameters, origin_datas):
    # 从参数中提取各个类别的数量
    num_CCA1919 = parameters["CCA1919"]
    num_CES2360 = parameters["CES2360"]
    num_CES2551 = parameters["CES2551"]
    num_CHH7303 = parameters["CHH7303"]
    num_CHH7426 = parameters["CHH7426"]
    num_CHH7695 = parameters["CHH7695"]
    num_CSN3371 = parameters["CSN3371"]
    num_CSN6568 = parameters["CSN6568"]
    num_CSN6761 = parameters["CSN6761"]

    # 用于存储最终选定的数据
    selected_datas = []

    # 定义一个字典来存储每个类别的剩余数量
    remaining_counts = {
        "CCA1919": num_CCA1919,
        "CES2360": num_CES2360,
        "CES2551": num_CES2551,
        "CHH7303": num_CHH7303,
        "CHH7426": num_CHH7426,
        "CHH7695": num_CHH7695,
        "CSN3371": num_CSN3371,
        "CSN6568": num_CSN6568,
        "CSN6761": num_CSN6761
    }
    # 遍历原始数据
    for path, label, data in origin_datas:
        if label in remaining_counts and remaining_counts[label] > 0:
            # 添加到选定的数据列表中
            selected_datas.append([path, label, data])
            # 减少剩余数量
            remaining_counts[label] -= 1

    return selected_datas
def origin_data_plot(parameters, selected_datas, width):
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)
    datas = selected_datas
    
    # 获取所有唯一的标签
    unique_labels = list(set(item[1] for item in datas))
    k = len(unique_labels)  # 簇类数量
    
    # 为每个标签分配一个索引
    label_to_index = {label: idx for idx, label in enumerate(unique_labels)}
    
    # 生成标签数组
    y = np.array([label_to_index[item[1]] for item in datas], dtype=int)
    
    t = np.arange(0, 0.99, 0.01)
    yan = [sgn(i) for i in t]
    yan1 = demo(yan, 33)  # 红色
    yan2 = demo(yan, 66)  # 绿色
    yan3 = yan           # 蓝色

    # 清除当前 figure 中的所有 axes
    fig = plt.figure()
    ax = fig.gca()
    fig.set_size_inches(10, 6)

    legend_elements = []  # 用于存储图例元素
    for c in range(k):
        m = np.where(y == c)
        cc = (yan1[math.floor(c / (k + 1) * 99)], yan2[math.floor(c / (k + 1) * 99)], yan3[math.floor(c / (k + 1) * 99)])

        for i in m[0]:  # 直接使用m[0]来迭代索引
            dfc = datas[i][2][:, :width]  # 提取航迹数据
            # 过滤掉补充的零点
            valid_points = dfc[~np.all(dfc == 0, axis=1)]
            if valid_points.size > 0:
                ax.plot(valid_points[:, 1], valid_points[:, 2], linewidth=0.5, color=cc, alpha=0.8)  # 绘制航迹

        # 创建一个图例元素
        legend_elements.append(Line2D([0], [0], color=cc, lw=4, label=f'Label {unique_labels[c]}'))

    # 添加图例
    ax.legend(handles=legend_elements)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    if enable_debug_preview:
        plt.show()
    image_base64_origin = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_origin
   
def xunlian(parameters,labeled_datas,length):
    width=3
    cc=width

    lines= [[item[0], item[1]] for item in labeled_datas]

    min_max=preprocessing.MinMaxScaler()
    # 使用 LabelEncoder 将字符串标签转换为数值标签
    label_encoder = LabelEncoder()
    labels = [line[1] for line in lines]
    numeric_labels = label_encoder.fit_transform(labels)
    # 更新 lines 中的标签
    for i in range(len(lines)):
        lines[i][1] = numeric_labels[i]

    zz=0
    num=0
    ca=0
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            num=num+1  
            if lines[i-1][1]==lines[j-1][1]:  
                ca=ca+1    
    all_num = num+ca*zz  
    pairs_of_images2 = [np.zeros((all_num,length,cc)),np.zeros((all_num,length,cc)),np.zeros((all_num,1))]
    n=-1              #读取各类对应权重
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            n=n+1
            df1=np.load(lines[i-1][0])[0:length,0:width]
            df1=min_max.fit_transform(df1)
            df2=np.load(lines[j-1][0])[0:length,0:width]
            df2=min_max.fit_transform(df2)
            if len(df1)<length:
                df1=np.array(list(df1)+[[0 for i in range(cc)]]*(length-len(df1)))
            if len(df2)<length:
                df2=np.array(list(df2)+[[0 for i in range(cc)]]*(length-len(df2)))
            pairs_of_images2[0][n, :, :] = df1[0:length]
            pairs_of_images2[1][n, :, :] = df2[0:length]
            if lines[i-1][1]==lines[j-1][1]:  
                pairs_of_images2[2][n]=0
            else:
                pairs_of_images2[2][n]=1#1
                
    for z in range(zz):
        for i in range(1,len(lines)+1):
            for j in range(1,i+1): 
                if lines[i-1][1]==lines[j-1][1]:  
                    df1=np.load(lines[i-1][0])[0:length,0:width]
                    df1=min_max.fit_transform(df1)
                    df2=np.load(lines[j-1][0])[0:length,0:width]
                    df2=min_max.fit_transform(df2)
                    if len(df1)<length:
                        df1=np.array(list(df1)+[[0 for i in range(cc)]]*(length-len(df1)))
                    if len(df2)<length:
                        df2=np.array(list(df2)+[[0 for i in range(cc)]]*(length-len(df2))) 
                    n=n+1
                    pairs_of_images2[0][n, :, :] = df1[0:length]
                    pairs_of_images2[1][n, :, :] = df2[0:length]
                    pairs_of_images2[2][n]=0
    idx=rd.sample(range(num+ca*zz),num+ca*zz)          #随机抽样，打乱顺序，以免一个batch都是一个标签。
    pairs_of_images2[2]=pairs_of_images2[2][idx]
    pairs_of_images2[0]=pairs_of_images2[0][idx]
    pairs_of_images2[1]=pairs_of_images2[1][idx]

    
    input_shape=[length,cc]
    model = siamese(input_shape,length,cc)

    def get_layer_weights_count(model, layer_index):
        layer = model.layers[layer_index]
        weights = layer.get_weights()
        weights_count = len(weights)
        return weights_count

    # 假设你的模型已经定义并加载了
    # 这里假设要获取第三层的权重数量，索引从0开始
    layer_index = 2  # 第三层的索引为2
    weights_count = get_layer_weights_count(model, layer_index)
    print("第三层的权重数量：", weights_count)
    load_weights_path=parameters["load_weights_path"]
    logging.info('导入模型权重...')
    model.load_weights(load_weights_path,by_name=True)
  
    
    train_ratio = 0.9
    train_num = int(all_num*train_ratio)
    val_num = int(all_num*(1-train_ratio))
    train_pairs_of_images=[np.zeros((train_num,length,cc)) for i in range(2)]
    val_pairs_of_images=[np.zeros((val_num,length,cc)) for i in range(2)]
    Y_train=pairs_of_images2[2][0:train_num]
    train_pairs_of_images[0]=pairs_of_images2[0][0:train_num]
    train_pairs_of_images[1]=pairs_of_images2[1][0:train_num]
    Y_val=pairs_of_images2[2][train_num:all_num]
    val_pairs_of_images[0]=pairs_of_images2[0][train_num:all_num]
    val_pairs_of_images[1]=pairs_of_images2[1][train_num:all_num]    
    Batch_size =256
    Lr =0.01    
    train_epochs=parameters["train_epochs"]
    log_callback = LambdaCallback(on_epoch_end=lambda epoch, logs: logging.info(f"已训练 {epoch + 1}/{train_epochs} 轮"))
    model.compile(loss = "mean_absolute_error",optimizer = Adam(lr=Lr))#,metrics = ["binary_accuracy"]
    model.summary()
    print('Train with batch size {}.'.format(Batch_size))
    try:
        history=model.fit(train_pairs_of_images,[train_pairs_of_images[0],train_pairs_of_images[1],Y_train],
                validation_data=(val_pairs_of_images,[val_pairs_of_images[0],val_pairs_of_images[1],Y_val]),
                epochs=train_epochs,callbacks=[CheckCancel(),log_callback]  
                )
    except Exception:
        raise ValueError("分类航迹数过少，请选择更多航迹") 
    # 保存模型权重
    logging.info('保存模型权重...')
    saved_weights_path=parameters["saved_weights_path"]
    model.save_weights(saved_weights_path)
    
    # 保存编码器模型
    logging.info('保存编码器模型...')
    saved_model_path=parameters["saved_model_path"]
    model.save(saved_model_path)
    def show_train_history(train_history, train, validation,parameters):
        enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
        plt.plot(train_history.history[train])
        plt.plot(train_history.history[validation])
        #plt.title('Train History')
        plt.ylabel(train)
        plt.xlabel('Epoch')
        plt.legend(['test','train'], loc='upper right')
        plt.savefig('xunlian.png',bbox_inches='tight')
        if enable_debug_preview:
            plt.show()
        plt.clf()
        plt.close()
    show_train_history(history, 'loss', 'val_loss',parameters)#绘制损失函数执行曲线
    return model
    

def xunlian2(parameters,result_1,model,length):
    width=3
    selected_datas=result_1["selected_datas"]
    lines= [item[0] for item in selected_datas]
    cc=width
    min_max=preprocessing.MinMaxScaler()
       
    num=0
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            num=num+1
    pairs_of_images = [np.zeros((num,length,cc)) for i in range(2)]
    n=-1
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            df1=np.load(lines[i-1])[0:length,0:cc]
            df2=np.load(lines[j-1])[0:length,0:cc]
            df1=min_max.fit_transform(df1)
            df2=min_max.fit_transform(df2)
            if len(df1)<length:
                df1=np.array(list(df1)+[[0 for i in range(cc)]]*(length-len(df1)))
            if len(df2)<length:
                df2=np.array(list(df2)+[[0 for i in range(cc)]]*(length-len(df2)))
            n=n+1
            pairs_of_images[0][n, :, :] = df1
            pairs_of_images[1][n, :, :] = df2
    model.summary()        
    predict_results=model.predict(pairs_of_images,batch_size=1024)
    disjz=-np.ones([len(lines),len(lines)]) 
    n=-1     
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            n=n+1
            disjz[i-1,j-1]=predict_results[2][n]
            disjz[j-1,i-1]=disjz[i-1,j-1]
    

    disjz_path=parameters["load_disjz_path"]        
    disjz2=np.loadtxt(disjz_path)   
    k_range = range(3,13)  # Try K values from 2 to 6

    best_k = find_optimal_k(disjz2, k_range,parameters)
    print("Optimal K value:", best_k)
    logging.info('预测k值为：'+str(best_k))
    k=best_k    
    #DBSCAN
    #根据上面观察到的基础大小设计R
    if k==0:  
        #DBSCAN
        #根据上面观察到的基础大小设计R
        R=disjz.max()*0.1       #            #邻域半径
        mintr=3              #最小邻居点数                            邻域内的邻居数大于mintr个，则认为他是核心点
        neibor=[]
        for i in range(len(disjz)):
            linshi=[]
            for j in range(len(disjz)):
                if disjz[i,j]<R:
                    linshi.append(j)
            neibor.append(linshi)
        vistied=[]
        for i in range(len(disjz)):
            vistied.append(0)
        a=[]
        for i in range(len(disjz)):
            if vistied[i]!=1 and len(neibor[i])>mintr:   #选择核心点
                cu=[]
                for j in neibor[i]:
                    if vistied[j]!=1:
                        cu.append(j)
                        vistied[j]=1
                while True:
                    cu2=cu
                    for ii in cu:
                        for jj in neibor[ii]:
                            if vistied[jj]!=1:
                                cu.append(jj)
                                vistied[jj]=1
                    if cu2==cu:
                        break
                a.append(cu)
    else:
        k=best_k 
        x=[i for i in range(len(disjz))]              #x直接用1到最终的数字来代替每一条轨迹
        y=np.zeros(len(disjz))
        [a,kc]=julei(k,x,y,disjz)  
    
    
    return a,disjz,best_k,predict_results
 
def data_save(f_dir,f_dir1,a):
    file_name=os.listdir(f_dir)      
    k=len(a)
    y=np.zeros(len(file_name))
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i     
    for c in range(k):
        m=np.where(y==c)
        if not os.path.exists(f_dir1+'//'+str(c)):
            os.mkdir(f_dir1+'//'+str(c))
        for item in m:
            continue
        for i in item:
            shutil.copy(f_dir+'//'+file_name[i],f_dir1+'//'+str(c))


def CancelTrain():
    __CANCEL_WAITHANDLE__=True
# 自定义回调函数
class CheckCancel(Callback):
    def on_batch_end(self, batch, logs=None):
        global __CANCEL_WAITHANDLE__
        if __CANCEL_WAITHANDLE__:
            self.model.stop_training = True
            print("Training stopped due to __CANCEL_WAITHANDLE__ being True")
def reset_canceltrain():
    __CANCEL_WAITHANDLE__=False

__CANCEL_WAITHANDLE__=False


def load_label_data(parameters,paths):
    length=parameters["length"]
    logging.info('导入标签数据...')
    selected_datas=[]
    for path in paths:
        try:
            label = extract_label(path)
            data = np.load(path)  # 加载.npz 或 .npy 文件
            data=data[0:length,:]
            if data.ndim != 2 or data.shape[1] != 3:
                raise ValueError(f"Data shape {data.shape} is not valid. Expected a 2D array with 3 features per point.")
            if len(data)<length:
                data=np.array(list(data)+[[0 for i in range(3)]]*(length-len(data)))
            selected_datas.append([path, label, data])
        except ValueError as e:
            print(e)
            continue
    logging.info('导入标签数据成功')
    
    return selected_datas

def load_data_by_paths(parameters,paths):
    length=parameters["length"]
    logging.info('导入外部数据...')
    selected_datas=[]
    for path in paths:
        try:
            label = extract_label(path)
            data = np.load(path)  # 加载.npz 或 .npy 文件
            data=data[0:length,:]
            if data.ndim != 2 or data.shape[1] != 3:
                raise ValueError(f"Data shape {data.shape} is not valid. Expected a 2D array with 3 features per point.")
            if len(data)<length:
                data=np.array(list(data)+[[0 for i in range(3)]]*(length-len(data)))
            selected_datas.append([path, label, data])
        except ValueError as e:
            print(e)
            continue
    logging.info('导入外部数据成功')
    logging.info('原始航迹图生成...')
    image_base64_origin= origin_data_plot(parameters,selected_datas,3)
    
    return {
        "base64ImgStr":image_base64_origin,
        "selected_datas":selected_datas,
        }
    

def load_data_by_num(parameters):
    logging.info('导入内部数据...')
    current_dir = os.path.dirname(os.path.abspath(__file__))
    data_dir = os.path.join(current_dir, '内部数据')

    # 读取数据的函数
    selected_datas = []
    label_counts = defaultdict(int)
    length = parameters["length"]

    for label, count in parameters.items():
        if label in ["length", "__ENABLE_DEBUG_PREVIEW__", "q"]:
            continue
        label_dir = os.path.join(data_dir, label)
        if not os.path.exists(label_dir):
            logging.warning(f"Label directory {label_dir} does not exist")
            continue

        files = os.listdir(label_dir)
        np.random.shuffle(files)  # 随机打乱文件顺序
        count = min(count, len(files))

        if count > 50:
            raise ValueError(f"Exceeded maximum count for label {label}: {count}")

        for file in files[:count]:
            file_path = os.path.join(label_dir, file)
            data = np.load(file_path)  # 假设数据是以 .npy 格式存储
            if data.shape[1] != 3:
                raise ValueError(f"Data shape {data.shape} is not valid. Expected (N, 3).")
            data = data[:length, :]  # 只取前 length 个点
            if len(data)<length:
                data=np.array(list(data)+[[0 for i in range(3)]]*(length-len(data)))
            selected_datas.append([file_path, label, data])
            label_counts[label] += 1
    logging.info('导入内部数据成功')
    logging.info('原始航迹图生成...')
    image_base64_origin= origin_data_plot(parameters,selected_datas,3)
    return {
        "base64ImgStr":image_base64_origin,
        "selected_datas":selected_datas,

    }
        
def main(parameters0,parameters,result_1):
    length=parameters0["length"]
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用 SimHei 字体
    plt.rcParams['axes.unicode_minus'] = False    # 正常显示负号
    
    # 主循环
    loop_num = 1
    silhouette_list = []
    dbi_list = []
    chi_list = []
    out_path=parameters["out_path"]
    
    for _ in range(loop_num):
        reset_canceltrain()
        logging.info('获取标签数据...')
        # labeled_rate=parameters["labeled_rate"]
        # labeled_lost=parameters["labeled_lost"]
        # selected_datas=result_1["selected_datas"]
        load_label_path=parameters["load_label_path"]
        label_paths=[]
        for alphabet in os.listdir(load_label_path):
            alphabet_path=os.path.join(load_label_path,alphabet)
            label_paths.append(alphabet_path)
        labeled_datas=load_label_data(parameters0,label_paths)
        # 记录算法运行时间
        logging.info('开始训练...')
        start_time = time.time()  # 开始计时
        model=xunlian(parameters,labeled_datas,length)
        logging.info('开始分类...')
        a,X,best_k,predict_results= xunlian2(parameters,result_1,model,length)
        end_time = time.time()  # 结束计时
        elapsed_time = (end_time - start_time) # 计算耗时
        # 将时间格式化为分钟和秒
        minutes = int(elapsed_time // 60)
        seconds = int(elapsed_time % 60)
        # 记录日志
        logging.info(f"分类完成。总耗时: {minutes} 分钟 {seconds} 秒")
        # class_accuracies_list =[]
        # class_purity_list = []
        logging.info('生成分类航迹图...')
        image_base64_julei=data_plot(parameters,result_1,a,3)

        julei_datas=data_julei(result_1,a)
        # 提取特征和标签
        # 直接展平每个样本的特征
        # features = predict_results.reshape(len(predict_results), -1)
        # print(predict_results[2])
        # features = predict_results[2]
        # print(predict_results)
        # labels = np.array([item[1] for item in julei_datas])
        #         # 确保标签数量与特征数量一致
        # assert len(features) == len(labels), "特征数量与标签数量不一致"
        # origin_datas = result_1["selected_datas"]

        # origin_datas = result_1["selected_datas"]
        
        # 提取真实标签和聚类标签
        #true_labels = [item[1] for item in origin_datas]
        #cluster_labels = [item[1] for item in julei_datas]
        # true_labels = np.array([item[1] for item in origin_datas])
        # cluster_labels = np.array([item[1] for item in julei_datas])
        # 假设这里进行了聚类操作，生成新的聚类标签
        #new_cluster_labels = cluster_labels  # 这里假设聚类标签不变，实际应用中可能不同
        logging.info('保存结果...')
        save_clusters_to_files(out_path,julei_datas)
        # 将聚类标签映射到真实标签
        # mapped_labels, cluster_to_label = map_clusters_to_labels(true_labels, new_cluster_labels)

        # logging.info(f"标签映射: {cluster_to_label}")
        logging.info('计算轮廓系数、DBI和CHI...')
        # 计算三个内部指标
        silhouette = calculate_silhouette_coefficient(julei_datas)
        dbi = calculate_davies_bouldin_index(julei_datas)
        chi = calculate_calinski_harabasz_index(julei_datas)

        # 保存结果
        silhouette_list.append(silhouette)
        dbi_list.append(dbi)
        chi_list.append(chi)


    logging.info('生成指标...')
    # 调用函数绘制图表
    parameters = {"__ENABLE_DEBUG_PREVIEW__": True}
    # image_base64_index = plot_dual_bars(average_class_purity, average_class_accuracies, true_labels, parameters, title='聚类性能')
    # for label in average_class_accuracies.keys():
    #     logging.info(f"{label}: 准确率: {average_class_accuracies[label]:.4f}, 纯度: {average_class_purity[label]:.4f}")

    # 记录平均准确率和平均纯度到日志
    logging.info(f"轮廓系数: {silhouette:.4f}, DBI: {dbi:.4f}, 方差比准则CHI: {chi:.4f}")
    # 打印结果
    print("轮廓系数:",silhouette)
    print("DBI:", dbi)
    print("方差比准则CHI:", chi)
        # 调用函数绘制图表，将这个图像整体画上

    

    return {
        "base64Imgcluster":image_base64_julei,              #多航迹聚类图
        "cluster_traj_data":julei_datas ,                   #list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
        #"base64ImgIdx":image_base64_index ,                 #纯度、准确率指标图
        'silhouette': silhouette,                          #轮廓系数
        'DBI': dbi,                                        #DBI
        'CHI': chi,                                        #CHI
        'time': elapsed_time,                               #算法运行时间
        'best_k':best_k,                                    #聚类簇数
    }



if __name__=="__main__":
    path= r"C:\Users\22377\Desktop\聚类\预处理"
    paths=[]
    cc=3
    for alphabet in os.listdir(path):
        alphabet_path=os.path.join(path,alphabet)
        paths.append(alphabet_path)


    parameters_1= {
        "length":300,     #float，数据长度，不超过400
        "__ENABLE_DEBUG_PREVIEW__":True,                            #允许内部打印调试内容，如：图表，界面等（内部使用，无需实现）        
        #类别标签
        "CCA1919" : 20,          #最多50
        "CES2360": 20,            #最多50
        "CES2551": 20,            #最多50
        "CHH7303": 20,            #最多50
        "CHH7426": 20,       #最多50
        "CHH7695": 20,        #最多50
        "CSN3371": 20,           #最多50
        "CSN6568": 20,           #最多50
        "CSN6761": 20,        #最多50
    }

    
    parameters_2 = {

        "__ENABLE_DEBUG_PREVIEW__":True, 
        # "labeled_rate":0.1,                                                                   #标记率,0=<n<=1,步长0.1
        # "labeled_lost":1,      #新增    0>x<4                                                         #缺失类别数，0-9闭区间  
        "load_label_path":r"D:\temp\xinda\trackCluster\标签数据",
        "load_weights_path":r'C:\Users\22377\Desktop\wujianduzibianmaqi_weights.h5',        #权重保存路径
        "load_model_path":r'C:\Users\22377\Desktop\wujianduzibianmaqi_model.h5',         #模型保存路径
        "saved_weights_path":r'C:\Users\22377\Desktop\banjianduzibianmaqi_weights.h5',        #权重保存路径
        "saved_model_path":r'C:\Users\22377\Desktop\banjianduzibianmaqi_model.h5',         #模型保存路径
        "train_epochs":80,                                                                #训练轮次
        "train_batch_size":32,                                                             #训练每轮次个数
        "load_disjz_path":r'C:\Users\22377\Desktop\wjd_disjz.txt',                                  #距离矩阵保存地址
        "out_path":r'C:\Users\22377\Desktop\半监督聚类结果'                                 #聚类结果保存地址
    }
    
    
    result_0=load_data_by_paths(parameters_1,paths)#外部数据
    #result_1=load_data_by_num(parameters_1)#内部数据
    #  result_1:{
    #     "base64ImgStr":image_base64_traj, #xxxxx
    #     "traj_data":traj_data
    # }


    result_2=main(parameters_1,parameters_2,result_0)
     #  reslt_1:{
    #     "base64ImgStr":image_base64_traj, #xxxxx
    #     "traj_data":traj_data
    #"cluster_traj_data":julei_datas ,                  #	list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
    #    "base64ImgPca":image_base64_index               #指标图
    # }