import os,shutil
from warnings import simplefilter
simplefilter(action='ignore',category=FutureWarning)
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot#,savefig
import time
import keras
from keras.models import Model#,Sequential
from keras.optimizers import Adam
from keras.layers import Dense,Dropout,BatchNormalization,GRU,Lambda
from sklearn import preprocessing
import random as rd
import tensorflow as tf
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, adjusted_rand_score
import numpy as np
from matplotlib.lines import Line2D
from keras.callbacks import Callback, LearningRateScheduler
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
  try:
    tf.config.experimental.set_virtual_device_configuration(
        gpus[0],
        [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*4)])  # 限制GPU内存使用为4GB
    logical_gpus = tf.config.experimental.list_logical_devices('GPU')
    print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
  except RuntimeError as e:
    print(e)
tf.keras.backend.clear_session()  # 清理session
import keras.backend as K
from keras.layers import Input
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from numpy.linalg import eig
import io
import base64
import re
from sklearn.preprocessing import LabelEncoder
def initcenter(x,k):                                                       #生成随机初始中心点
    b = math.modf(time.time())
    np.random.seed(int(b[0]*1000))
    flag=True
    a=[]
    while flag:
        d=np.random.choice(len(x)-1)
        if d not in a:
            a.append(d)
        if len(a)==k:flag=False                                 #不允许出现重复的中心点
    #print(a)
    #print(2)
    return a
def find_optimal_k(data, k_range,parameters):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    max_silhouette_score = -1
    optimal_k = None
    silhouette_scores=[]
    for k in k_range:
        
        kmeans = KMeans(n_clusters=k, random_state=42)
        cluster_labels = kmeans.fit_predict(data)
        silhouette_avg = silhouette_score(data, cluster_labels,metric='precomputed')
        silhouette_scores.append(silhouette_avg)
        if silhouette_avg > max_silhouette_score:
            max_silhouette_score = silhouette_avg
            optimal_k = k
    
    plt.plot(k_range, silhouette_scores, 'bx-')
    plt.xlabel('Number of Clusters (K)')
    plt.ylabel('Silhouette Score')
    plt.title('Silhouette Score for Each K')
    if enable_debug_preview:
        plt.show()
    return optimal_k
def nearest(kc,i,k,disjz):
    d=[]
    for n in range(k):
        d.append(disjz[kc[n],i])                                             #返回该样本到所有中心点的距离，通过排序的方法定的，不会重复
    #print(d)        #可以用来看计算出的离中心点的距离，和本身应该是0，可用于检验
    w=np.where(d==np.min(d))
    return w[0][0]

def xclassify(x,y,kc,k,disjz):
    for i in range (len(x)):  #对数组的每个值分类
        y[i]=nearest(kc,x[i],k,disjz)             #
    return y

def newcenten(m,ycen,disjz):
    cen=ycen                                    #可能有输入之后只有自己一条的
    for item in m:
        continue
    a=float('inf')
    for i in item:
        distance=0
        for n in item:    
            c=disjz[i,n]
            distance=distance+c
        if a>distance:
            a=distance
            cen=i
    return cen

def kcmean (x,y,kc,k,disjz):  #计算各聚类新均值
    l=list(kc)
    flag=False
    for c in range(k):
        m=np.where(y==c)
        n=newcenten(m,l[c],disjz)
        if l[c]!=n:
            l[c]=n
            flag=True  #聚类中心发生变化
    #print('1')
    return (np.array(l),flag)      
  
def deifen(y,disjz):                                                    #用轮廓系数的方法来判断分类的优劣，输入为一个点，输出为该点得分。
    s=[]    
    for i in range(len(disjz)): 
        a=0
        b=[]
        dd=set(y)
        for n in dd:
            m=np.where(y==n)
            for item in m:
                continue
            distance=0                #到一个簇的距离之和
            for j in item:      #遍历该簇中所有项，距离求和
                c=disjz[i,j]
                distance=distance+c
            distance=distance/len(item)
            if (item == i).any():            #本簇，加入a
                a=distance
            else:                            #非本簇，加入b
                b.append(distance)           
        if b==[]:
            print(y)
        b=min(b)
        z=[a,b]
        if a==0:                             #如果簇内只有一条样本，s为0
            s.append(0)
        else:
            s.append((b-a)/max(z)) 
    s=np.mean(s)                                    
    return s

def julei(k,x,yi,disjz):             #k,类别数，x指代每一个样本（在这里是1到end），yi是
    a=-float('inf')
    for i in range(10):                           #每一个计算十次，取其中最好的一次
        kc=initcenter(x,k)
        y=yi
        flag=True
        count=1
        while flag:
            count=1+count
            y = xclassify(x,y,kc,k,disjz)                     #y就是每个样本分类后的类别
            kc,flag = kcmean(x,y,kc,k,disjz)                 #两种使flag变False 的方法，中心点不变和计算十次
            print(flag)
            if count>10:
                flag=False
                print('z')
        if deifen(y,disjz)>a:
            yrr=y.copy()
            a=deifen(y,disjz)     
            kcr=kc
    b=[]
    for j in range(k):
        a=[]
        for i in range(len(disjz)):
            if (yrr[i])==j:
                a.append(i)
        b.append(a)          
    return b,kcr
def demo(lst, k):
    return lst[k:] + lst[:k]
def sgn(x):
    if x >=0 and x<1/3 :
        return 0
    elif x<5/6 and x>=1/2:
        return 1
    elif x>=2/6 and x<3/6:
        return 6*x-2
    elif x>=5/6 and x<=1:
        return -6*x+6
def guiyi(disjz):
    [aa,bb]=disjz.shape
    disjz=disjz.reshape([aa*bb,1])
    min_max=preprocessing.MinMaxScaler()
    disjz=min_max.fit_transform(disjz)
    disjz=disjz.reshape([aa,bb])
    return disjz
def pca(X,k):
    X = X - X.mean(axis = 0) #向量X去中心化
    X_cov = np.cov(X.T, ddof = 0) #计算向量X的协方差矩阵，自由度可以选择0或1
    eigenvalues,eigenvectors = eig(X_cov) #计算协方差矩阵的特征值和特征向量
    klarge_index = eigenvalues.argsort()[-k:][::-1] #选取最大的K个特征值及其特征向量
    k_eigenvectors = eigenvectors[klarge_index] #用X与特征向量相乘
    return np.dot(X, k_eigenvectors.T)

class lstm_model:
    def __init__(self,length,cc):
        #model = Sequential()
        self.block1=GRU(40, return_sequences=True,input_shape=[length,cc],name='c1', recurrent_activation='sigmoid')#
        self.block2=GRU(20,name='d1', recurrent_activation='sigmoid')   
#         self.flatten = Flatten(name = 'flatten')
    def call(self, inputs):
        x = inputs
        x = self.block1(x)
        outputs = self.block2(x)
        # = self.flatten(x)
        return outputs
class delstm_model:
    def __init__(self,length,cc):
        #model = Sequential()
        self.block0=RepeatVector(length)
        self.block1=GRU(20,return_sequences=True,name='e1', recurrent_activation='sigmoid')
        self.block2=GRU(40,return_sequences=True,name='f1', recurrent_activation='sigmoid')#
        self.block3=TimeDistributed(Dense(cc,name='g1'))

    def call(self, inputs):
        x = inputs
        x = self.block0(x)
        x = self.block1(x)
        x = self.block2(x)
        outputs = self.block3(x)
        # = self.flatten(x)
        return outputs
#-------------------------#
#   创建孪生神经网络
#-------------------------#
def siamese(input_shape,length,cc):
    g_model = lstm_model(length,cc)
    g_model2 = delstm_model(length,cc)

    input_1 = Input(shape=input_shape)
    input_2 = Input(shape=input_shape)
    encoded_image_1 = g_model.call(input_1)  
    encoded_image_2 = g_model.call(input_2)
    outt1=g_model2.call(encoded_image_1)
    outt2=g_model2.call(encoded_image_2)
    out = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))([encoded_image_1, encoded_image_2])
    #out = Dense(2,activation='relu')(out)
    out = BatchNormalization()(out)
    out = Dropout(0.1)(out)
    
    
    outt= Lambda(lambda tensors: tf.reduce_mean(tensors,axis=1,keepdims = True))(out)
    model = Model([input_1, input_2], [outt1,outt2,outt])
    return model 
def extract_label(path):
    # 使用正则表达式从路径中提取标签
    match = re.search(r'Variflight_(\w+)_\d{8}\.xls\d+\.npy$', path)
    if match:
        return match.group(1)
    else:
        raise ValueError(f"无法从路径 {path} 中提取标签")
def load_data(parameters):
    paths = parameters["paths"]
    result = []

    for path in paths:
        #print(f"Processing path: {path}")
        try:
            label = extract_label(path)
            data = np.load(path)  # 加载.npz 或 .npy 文件
            #print(f"Data shape: {data.shape}")
            if data.ndim != 2 or data.shape[1] != 3:
                raise ValueError(f"Data shape {data.shape} is not valid. Expected a 2D array with 3 features per point.")
            result.append([path, label, data])
        except ValueError as e:
            print(e)
            continue

    return result
def load_label_data(parameters):
    paths = parameters["labeldata_paths"]
    result = []

    for path in paths:
        #print(f"Processing path: {path}")
        try:
            label = extract_label(path)
            data = np.load(path)  # 加载.npz 或 .npy 文件
            #print(f"Data shape: {data.shape}")
            if data.ndim != 2 or data.shape[1] != 3:
                raise ValueError(f"Data shape {data.shape} is not valid. Expected a 2D array with 3 features per point.")
            result.append([path, label, data])
        except ValueError as e:
            print(e)
            continue
    return result    
def xunlian(parameters):
    length=300
    width=3
    cc=width
    labeled_datas=load_label_data(parameters)
    print("&&&&&&&&&&&&&&&&&&&&&&")
    print(labeled_datas)
    lines= [[item[0], item[1]] for item in labeled_datas]
    
    min_max=preprocessing.MinMaxScaler()
    # 使用 LabelEncoder 将字符串标签转换为数值标签
    label_encoder = LabelEncoder()
    labels = [line[1] for line in lines]
    numeric_labels = label_encoder.fit_transform(labels)
    # 更新 lines 中的标签
    for i in range(len(lines)):
        lines[i][1] = numeric_labels[i]

    zz=0
    num=0
    ca=0
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            num=num+1  
            if lines[i-1][1]==lines[j-1][1]:  
                ca=ca+1    
    all_num = num+ca*zz  
    pairs_of_images2 = [np.zeros((all_num,length,cc)),np.zeros((all_num,length,cc)),np.zeros((all_num,1))]
    n=-1              #读取各类对应权重
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            n=n+1
            df1=np.load(lines[i-1][0])[0:length,0:width]
            df1=min_max.fit_transform(df1)
            df2=np.load(lines[j-1][0])[0:length,0:width]
            df2=min_max.fit_transform(df2)
            if len(df1)<length:
                df1=np.array(list(df1)+[[0 for i in range(cc)]]*(length-len(df1)))
            if len(df2)<length:
                df2=np.array(list(df2)+[[0 for i in range(cc)]]*(length-len(df2)))
            pairs_of_images2[0][n, :, :] = df1[0:length]
            pairs_of_images2[1][n, :, :] = df2[0:length]
            if lines[i-1][1]==lines[j-1][1]:  
                pairs_of_images2[2][n]=0
            else:
                pairs_of_images2[2][n]=1#1
                
    for z in range(zz):
        for i in range(1,len(lines)+1):
            for j in range(1,i+1): 
                if lines[i-1][1]==lines[j-1][1]:  
                    df1=np.load(lines[i-1][0])[0:length,0:width]
                    df1=min_max.fit_transform(df1)
                    df2=np.load(lines[j-1][0])[0:length,0:width]
                    df2=min_max.fit_transform(df2)
                    if len(df1)<length:
                        df1=np.array(list(df1)+[[0 for i in range(cc)]]*(length-len(df1)))
                    if len(df2)<length:
                        df2=np.array(list(df2)+[[0 for i in range(cc)]]*(length-len(df2))) 
                    n=n+1
                    pairs_of_images2[0][n, :, :] = df1[0:length]
                    pairs_of_images2[1][n, :, :] = df2[0:length]
                    pairs_of_images2[2][n]=0
    idx=rd.sample(range(num+ca*zz),num+ca*zz)          #随机抽样，打乱顺序，以免一个batch都是一个标签。
    pairs_of_images2[2]=pairs_of_images2[2][idx]
    pairs_of_images2[0]=pairs_of_images2[0][idx]
    pairs_of_images2[1]=pairs_of_images2[1][idx]

    
    input_shape=[length,cc]
    model = siamese(input_shape,length,cc)

    def get_layer_weights_count(model, layer_index):
        layer = model.layers[layer_index]
        weights = layer.get_weights()
        weights_count = len(weights)
        return weights_count

    # 假设你的模型已经定义并加载了
    # 这里假设要获取第三层的权重数量，索引从0开始
    layer_index = 2  # 第三层的索引为2
    weights_count = get_layer_weights_count(model, layer_index)
    print("第三层的权重数量：", weights_count)
    load_weights_path=parameters["load_weights_path"]
    model.load_weights(load_weights_path,by_name=True)
  
    
    train_ratio = 0.9
    train_num = int(all_num*train_ratio)
    val_num = int(all_num*(1-train_ratio))
    train_pairs_of_images=[np.zeros((train_num,length,cc)) for i in range(2)]
    val_pairs_of_images=[np.zeros((val_num,length,cc)) for i in range(2)]
    Y_train=pairs_of_images2[2][0:train_num]
    train_pairs_of_images[0]=pairs_of_images2[0][0:train_num]
    train_pairs_of_images[1]=pairs_of_images2[1][0:train_num]
    Y_val=pairs_of_images2[2][train_num:all_num]
    val_pairs_of_images[0]=pairs_of_images2[0][train_num:all_num]
    val_pairs_of_images[1]=pairs_of_images2[1][train_num:all_num]    
    Batch_size =256
    Lr =parameters["Lr"]
    train_epochs=parameters["train_epochs"]
    model.compile(loss = "mean_absolute_error",optimizer = Adam(lr=Lr))#,metrics = ["binary_accuracy"]
    model.summary()
    print('Train with batch size {}.'.format(Batch_size))
    history=model.fit(train_pairs_of_images,[train_pairs_of_images[0],train_pairs_of_images[1],Y_train],
            validation_data=(val_pairs_of_images,[val_pairs_of_images[0],val_pairs_of_images[1],Y_val]),
            epochs=train_epochs,callbacks=[CheckCancel()]  
            )
    def show_train_history(train_history, train, validation,parameters):
        enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
        plt.plot(train_history.history[train])
        plt.plot(train_history.history[validation])
        #plt.title('Train History')
        plt.ylabel(train)
        plt.xlabel('Epoch')
        plt.legend(['test','train'], loc='upper right')
        plt.savefig('xunlian.png',bbox_inches='tight')
        if enable_debug_preview:
            plt.show()
        plt.clf()
        plt.close()
    show_train_history(history, 'loss', 'val_loss',parameters)#绘制损失函数执行曲线
    return model
    

def xunlian2(parameters,model):
    paths=parameters["paths"]
    length=300
    width=3
    k=parameters["k"]
    #距离矩阵
    lines=paths
    cc=width
    min_max=preprocessing.MinMaxScaler()
       
    num=0
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            num=num+1
    pairs_of_images = [np.zeros((num,length,cc)) for i in range(2)]
    n=-1
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            df1=np.load(lines[i-1])[0:length,0:cc]
            df2=np.load(lines[j-1])[0:length,0:cc]
            df1=min_max.fit_transform(df1)
            df2=min_max.fit_transform(df2)
            if len(df1)<length:
                df1=np.array(list(df1)+[[0 for i in range(cc)]]*(length-len(df1)))
            if len(df2)<length:
                df2=np.array(list(df2)+[[0 for i in range(cc)]]*(length-len(df2)))
            n=n+1
            pairs_of_images[0][n, :, :] = df1
            pairs_of_images[1][n, :, :] = df2
    model.summary()        
    predict_results=model.predict(pairs_of_images,batch_size=1024)


    
    disjz=-np.ones([len(lines),len(lines)]) 
    

    n=-1     
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            n=n+1
            disjz[i-1,j-1]=predict_results[2][n]
            disjz[j-1,i-1]=disjz[i-1,j-1]


    disjz_path=parameters["load_disjz_path"]        
    disjz2=np.loadtxt(disjz_path)   
    k_range = range(3,13)  # Try K values from 2 to 6

    best_k = find_optimal_k(disjz2, k_range,parameters)
    print("Optimal K value:", best_k)
          
    #DBSCAN
    #根据上面观察到的基础大小设计R
    if k==0:  
        #DBSCAN
        #根据上面观察到的基础大小设计R
        R=disjz.max()*0.1       #            #邻域半径
        mintr=3              #最小邻居点数                            邻域内的邻居数大于mintr个，则认为他是核心点
        neibor=[]
        for i in range(len(disjz)):
            linshi=[]
            for j in range(len(disjz)):
                if disjz[i,j]<R:
                    linshi.append(j)
            neibor.append(linshi)
        vistied=[]
        for i in range(len(disjz)):
            vistied.append(0)
        a=[]
        for i in range(len(disjz)):
            if vistied[i]!=1 and len(neibor[i])>mintr:   #选择核心点
                cu=[]
                for j in neibor[i]:
                    if vistied[j]!=1:
                        cu.append(j)
                        vistied[j]=1
                while True:
                    cu2=cu
                    for ii in cu:
                        for jj in neibor[ii]:
                            if vistied[jj]!=1:
                                cu.append(jj)
                                vistied[jj]=1
                    if cu2==cu:
                        break
                a.append(cu)
    else:
        k=best_k 
        x=[i for i in range(len(disjz))]              #x直接用1到最终的数字来代替每一条轨迹
        y=np.zeros(len(disjz))
        [a,kc]=julei(k,x,y,disjz)  
    #k-means
    #np.savetxt(r"C:\Users\Lenovo\Desktop\bjd3.txt",disjz)        
    
    return a,disjz
def data_biaoqian(parameters,a):
    paths=parameters['paths']
    L=300
    width=3
    datas=[]
    k=len(a)
    y=np.zeros(len(paths))
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i    

    for c in range(k):
        m=np.where(y==c)

        for item in m:
            continue
        for i in item:
            df=np.load(paths[i])[0:L,0:width]
            datas.append([paths[i],c,df])
    return datas
def data_plot(parameters, a, L, width):
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)
    k = len(a)  # 簇类数量
    paths = parameters["paths"]
    y = np.zeros(len(paths), dtype=int)

    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]] = i
    t = np.arange(0, 0.99, 0.01)
    yan = []
    for i in t:
        y_1 = sgn(i)
        yan.append(y_1)
    yan1 = demo(yan, 33)  # 红色
    yan2 = demo(yan, 66)  # 绿色
    yan3 = yan           # 蓝色

    fig = plt.figure()
    ax = fig.gca()
    fig.set_size_inches(10, 6)

    legend_elements = []  # 用于存储图例元素
    for c in range(k):
        m = np.where(y == c)
        cc = (yan1[math.floor(c / (k + 1) * 99)], yan2[math.floor(c / (k + 1) * 99)], yan3[math.floor(c / (k + 1) * 99)])

        for i in m[0]:  # 直接使用m[0]来迭代索引
            dfc = np.load(paths[i])[0:L, 0:width]
            ax.plot(dfc[:, 1], dfc[:, 2], linewidth=0.5, color=cc, alpha=0.8)

        # 创建一个图例元素
        legend_elements.append(Line2D([0], [0], color=cc, lw=4, label=f'Cluster {c}'))

    # 添加图例
    ax.legend(handles=legend_elements)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    if enable_debug_preview:
        plt.show()
    image_base64_julei = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_julei
   
    
def pca_plot(parameters,X,a):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    k=len(a)
    paths=parameters["paths"]
    y=np.zeros(len(paths))
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i
    t=np.arange(0, 0.99, 0.01)
    yan=[]
    for i in t:                              #生成三个谱
        y_1=sgn(i)
        yan.append(y_1)
    yan1=demo(yan,33)   #红  长度为99
    yan2=demo(yan,66)   #绿
    yan3=yan    
    X_pca= pca(X, 2)
    fig = plt.figure()
    fig.set_size_inches(10,6)
    for c in range(k):
        m=np.where(y==c) 
        cc=(yan1[math.floor(c/(k+1)*99)],yan2[math.floor(c/(k+1)*99)],yan3[math.floor(c/(k+1)*99)])
        for item in m:
            continue        
        for jj in item:
            plot(X_pca[jj,0],X_pca[jj,1],'b.',color=cc)
    plt.axis('off')
    plt.rcParams['font.sans-serif']=['SimHei']
    plt.title('PCA')
    if enable_debug_preview:
        plt.show()
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_pca = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_pca
    
def data_save(f_dir,f_dir1,a):
    file_name=os.listdir(f_dir)      
    k=len(a)
    y=np.zeros(len(file_name))
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i     
    for c in range(k):
        m=np.where(y==c)
        if not os.path.exists(f_dir1+'//'+str(c)):
            os.mkdir(f_dir1+'//'+str(c))
        for item in m:
            continue
        for i in item:
            shutil.copy(f_dir+'//'+file_name[i],f_dir1+'//'+str(c))
# 计算纯度
def purity_score(y_true, y_pred):
    from scipy.stats import mode
    y_true = np.array(y_true)
    y_pred = np.array(y_pred)
    total = 0
    for k in np.unique(y_pred):
        mask = y_pred == k
        total += mode(y_true[mask])[1]
    return total / len(y_true)
# 计算轮廓系数
def calculate_silhouette_score(distance_matrix, cluster_labels):
    # 使用 silhouette_samples 计算每个样本的轮廓系数
    #sample_silhouette_values = silhouette_score(distance_matrix, cluster_labels, metric='precomputed')
    silhouette_avg = silhouette_score(distance_matrix, cluster_labels, metric='precomputed')
    # 计算平均轮廓系数
    #silhouette_avg = np.mean(sample_silhouette_values)
    return silhouette_avg

# 计算调整兰德指数
def calculate_ari(y_true, y_pred):
    ari = adjusted_rand_score(y_true, y_pred)
    return ari

# 绘制柱状图
def zhibiaotu(origin_datas, julei_datas, disjz,parameters):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    # 获取原始标签
    origin_labels = [data[1] for data in origin_datas]
    # 使用LabelEncoder将原始标签映射为0-(n-1)
    le = LabelEncoder()
    origin_labels_encoded = le.fit_transform(origin_labels)


    # 提取聚类标签
    cluster_labels = [data[1] for data in julei_datas]

    # 确保数据点数量一致
    assert len(disjz) == len(cluster_labels), "Distance matrix and labels must have the same number of samples"

    # 计算轮廓系数
    silhouette_avg = calculate_silhouette_score(disjz, cluster_labels)
    
    # 计算纯度
    purity = purity_score(origin_labels_encoded, cluster_labels)
    
    # 计算调整兰德指数
    ari = calculate_ari(origin_labels_encoded, cluster_labels)

    
    # 打印性能指标
    print(f"Silhouette Score: {silhouette_avg}")
    print(f"Purity: {purity}")
    print(f"Adjusted Rand Index (ARI): {ari}")
    
    # 绘制柱状图
    labels = ['Purity', 'Silhouette', 'ARI']
    values = [purity, silhouette_avg, ari]

    plt.bar(labels, values, color=['blue', 'green', 'red'])
    plt.ylabel('Scores')
    plt.title('Clustering Performance Metrics')
    if enable_debug_preview:
        plt.show()
    
    # 将图像转换为Base64编码
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_index = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_index

def CancelTrain():
    __CANCEL_WAITHANDLE__=True
# 自定义回调函数
class CheckCancel(Callback):
    def on_batch_end(self, batch, logs=None):
        global __CANCEL_WAITHANDLE__
        if __CANCEL_WAITHANDLE__:
            self.model.stop_training = True
            print("Training stopped due to __CANCEL_WAITHANDLE__ being True")
def reset_canceltrain():
    __CANCEL_WAITHANDLE__=False

__CANCEL_WAITHANDLE__=False



def main1(parameters):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    # 设置Matplotlib的字体以支持中文
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
    plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号

    # 功能一：导入数据
    data_path = parameters["data_path"]  # string类型，单条npy文件
    df = np.load(data_path)

    # 确认数据索引
    latitudes = df[:, 1]
    longitudes = df[:, 2]

    # 检查数据范围
    print("Latitude range:", latitudes.min(), latitudes.max())
    print("Longitude range:", longitudes.min(), longitudes.max())

    # 绘制图像
    plt.figure(figsize=(8, 6))
    plt.scatter(latitudes, longitudes, c='r', marker='o', s=50, label='航迹点')  # 使用蓝色圆点
    plt.xticks([])
    plt.yticks([])
    plt.axis('off')
    plt.title('单条航迹')
    plt.xlabel('Latitude')
    plt.ylabel('Longitude')
    plt.legend()  # 现在不会引发警告
    plt.grid(True)
    if enable_debug_preview:
        plt.show()

    # 将图像转换为Base64编码
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_traj = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    # 返回图像的Base64编码和数据
    traj_data = df
    return {
        "base64ImgStr":image_base64_traj,
        "traj_data":traj_data
    }   

def main2(parameters):
    reset_canceltrain()
    model=xunlian(parameters)
    a,X=xunlian2(parameters,model)
    
    image_base64_julei=data_plot(parameters,a,300,3)
    image_base64_pca=pca_plot(parameters,X,a)
    julei_datas=data_biaoqian(parameters,a)

    # 获得最终的二维列表
    origin_datas = load_data(parameters)

    image_base64_index=zhibiaotu(origin_datas,julei_datas,X,parameters)

    return {
        "base64Imgcluster":image_base64_julei,          #多航迹聚类图
        "base64ImgPca":image_base64_pca,                #航迹PCA降维散布图
        "cluster_traj_data":julei_datas ,                  #	list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
        "base64ImgIdx":image_base64_index               #指标图
    }        




if __name__=="__main__":



    path= r"C:\Users\22377\Desktop\聚类\预处理"
    paths=[]
    length=300
    cc=3
    for alphabet in os.listdir(path):
        alphabet_path=os.path.join(path,alphabet)
        paths.append(alphabet_path)

        # 数据输入部分
    labeldata_path=r"C:\Users\22377\Desktop\聚类\标签数据"   #标签数据文件夹是多条不同标签的数据，标签存在于文件名中
    labeldata_paths=[]
    for alphabet in os.listdir(labeldata_path):
        labelalphabet_path=os.path.join(labeldata_path,alphabet)
        labeldata_paths.append(labelalphabet_path)

    parameters_1 = {

        "data_path": r'C:\Users\22377\Desktop\聚类\预处理\Variflight_CCA1919_20210316.xls1.npy',     # string类型，数据导入的文件夹路径，文件夹应含有多条npy文件
        "__ENABLE_DEBUG_PREVIEW__":True,                            #允许内部打印调试内容，如：图表，界面等（内部使用，无需实现）
    }


    parameters_2 = {
        "__ENABLE_DEBUG_PREVIEW__":True, 
        "paths":paths ,                                                                   #[绝对地址]
        "labeldata_paths":labeldata_paths,                                                #标签数据地址
        "k":0,                                                                      #int，决定聚类方法,>=0
        "load_weights_path":r'C:\Users\22377\Desktop\wujianduzibianmaqi_weights.h5',        #权重保存路径
        "load_model_path":r'C:\Users\22377\Desktop\wujianduzibianmaqi_model.h5',         #模型保存路径
        "train_epochs":80,                                                                #训练轮次<120
        "Lr":0.01,                                                                         #初始学习率<0.1
        "load_disjz_path":r'C:\Users\22377\Desktop\disjz.txt',
    }
    
    
    result_1=main1(parameters_1)
    #  result_1:{
    #     "base64ImgStr":image_base64_traj, #xxxxx
    #     "traj_data":traj_data
    # }


    result_2=main2(parameters_2)
     #  reslt_1:{
    #     "base64ImgStr":image_base64_traj, #xxxxx
    #     "traj_data":traj_data
    #"cluster_traj_data":julei_datas ,                  #	list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
    #    "base64ImgPca":image_base64_index               #指标图
    # }