from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import os,shutil
from warnings import simplefilter
simplefilter(action='ignore',category=FutureWarning)
from numpy.linalg import eig
import keras
import math
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot#,savefig
import io
from sklearn import preprocessing
import base64
import random
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
  try:
    tf.config.experimental.set_virtual_device_configuration(
        gpus[0],
        [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*4)])  # 限制GPU内存使用为4GB
    logical_gpus = tf.config.experimental.list_logical_devices('GPU')
    print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
  except RuntimeError as e:
    print(e)
from PIL import Image, ImageDraw
tf.keras.backend.clear_session()  # 清理session
import re
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential, Model
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils, plot_model
from keras.optimizers import Adam
from keras.layers import Dense, Dropout, Flatten, Conv2D, Conv1D, MaxPooling2D, MaxPooling1D, LeakyReLU, Reshape, BatchNormalization, CuDNNGRU, LSTM, Activation, UpSampling2D, UpSampling1D
import random as rd
import tensorflow as tf
from keras import backend as K
from PIL import Image
import os
import cv2
import matplotlib.image as mpimg
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import silhouette_score, adjusted_rand_score
from matplotlib.lines import Line2D
from keras.callbacks import Callback, LearningRateScheduler
def initcenter(x,k):                                                       #生成随机初始中心点
    b = math.modf(time.time())
    np.random.seed(int(b[0]*1000))
    flag=True
    a=[]
    while flag:
        d=np.random.choice(len(x)-1)
        if d not in a:
            a.append(d)
        if len(a)==k:flag=False                                 #不允许出现重复的中心点
    #print(a)
    #print(2)
    return a

def nearest(kc,i,k,disjz):
    d=[]
    for n in range(k):
        d.append(disjz[kc[n],i])                                             #返回该样本到所有中心点的距离，通过排序的方法定的，不会重复
    #print(d)        #可以用来看计算出的离中心点的距离，和本身应该是0，可用于检验
    w=np.where(d==np.min(d))
    return w[0][0]

def xclassify(x,y,kc,k,disjz):
    for i in range (len(x)):  #对数组的每个值分类
        y[i]=nearest(kc,x[i],k,disjz)             #
    return y

def newcenten(m,ycen,disjz):
    cen=ycen                                    #可能有输入之后只有自己一条的
    for item in m:
        continue
    a=float('inf')
    for i in item:
        distance=0
        for n in item:    
            c=disjz[i,n]
            distance=distance+c
        if a>distance:
            a=distance
            cen=i
    return cen

def kcmean (x,y,kc,k,disjz):  #计算各聚类新均值
    l=list(kc)
    flag=False
    for c in range(k):
        m=np.where(y==c)
        n=newcenten(m,l[c],disjz)
        if l[c]!=n:
            l[c]=n
            flag=True  #聚类中心发生变化
    #print('1')
    return (np.array(l),flag)      
def pca(X,k):
    X = X - X.mean(axis = 0) #向量X去中心化
    X_cov = np.cov(X.T, ddof = 0) #计算向量X的协方差矩阵，自由度可以选择0或1
    eigenvalues,eigenvectors = eig(X_cov) #计算协方差矩阵的特征值和特征向量
    klarge_index = eigenvalues.argsort()[-k:][::-1] #选取最大的K个特征值及其特征向量
    k_eigenvectors = eigenvectors[klarge_index] #用X与特征向量相乘
    return np.dot(X, k_eigenvectors.T) 
def deifen(y,disjz):                                                    #用轮廓系数的方法来判断分类的优劣，输入为一个点，输出为该点得分。
    s=[]    
    for i in range(len(disjz)): 
        a=0
        b=[]
        dd=set(y)
        for n in dd:
            m=np.where(y==n)
            for item in m:
                continue
            distance=0                #到一个簇的距离之和
            for j in item:      #遍历该簇中所有项，距离求和
                c=disjz[i,j]
                distance=distance+c
            distance=distance/len(item)
            if (item == i).any():            #本簇，加入a
                a=distance
            else:                            #非本簇，加入b
                b.append(distance)           
        if b==[]:
            print(y)
        b=min(b)
        z=[a,b]
        if a==0:                             #如果簇内只有一条样本，s为0
            s.append(0)
        else:
            s.append((b-a)/max(z)) 
    s=np.mean(s)                                    
    return s

def julei(k,x,yi,disjz):             #k,类别数，x指代每一个样本（在这里是1到end），yi是
    a=-float('inf')
    for i in range(10):                           #每一个计算十次，取其中最好的一次
        kc=initcenter(x,k)
        y=yi
        flag=True
        count=1
        while flag:
            count=1+count
            y = xclassify(x,y,kc,k,disjz)                     #y就是每个样本分类后的类别
            kc,flag = kcmean(x,y,kc,k,disjz)                 #两种使flag变False 的方法，中心点不变和计算十次
            print(flag)
            if count>10:
                flag=False
                print('z')
        if deifen(y,disjz)>a:
            yrr=y.copy()
            a=deifen(y,disjz)     
            kcr=kc
    b=[]
    for j in range(k):
        a=[]
        for i in range(len(disjz)):
            if (yrr[i])==j:
                a.append(i)
        b.append(a)          
    return b,kcr

def shaixuan(path,f_dir1,length,width,yuzhi):
    lines=[]
    min_max=preprocessing.MinMaxScaler()
    for b in os.listdir(path):                          #试图保存完整的dir来作为x的输入，对于大数据来说应该是一个好方法。
#        for b in os.listdir(path+'\\'+alphabet):
         #   alphabet_path = os.path.join(path+'\\'+alphabet, b)
            #lines.append([alphabet_path,alphabet])     
        df1=np.load(path+'\\'+b)[0:length,0:width]
        t_max=df1[len(df1)-1,0]
        t_min=df1[0,0]
        t_mean=(t_max-t_min)/length
        t_qmax=0
        for i in range(0,length-1):
            lin=df1[i+1,0]-df1[i,0]
            if lin>t_qmax:
                t_qmax=lin
        print(lin)
        print(t_qmax)
        print(yuzhi*t_mean)
        if t_qmax<yuzhi*t_mean:
#            if not os.path.exists(f_dir1+'//'+str(b)):
#                os.mkdir(f_dir1+'//'+str(b))
            shutil.copy(path+'\\'+b,f_dir1+'//'+str(b))
def find_optimal_k(data, k_range,parameters):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    max_silhouette_score = -1
    optimal_k = None
    silhouette_scores=[]
    for k in k_range:
        
        kmeans = KMeans(n_clusters=k, random_state=42)
        cluster_labels = kmeans.fit_predict(data)
        silhouette_avg = silhouette_score(data, cluster_labels,metric='precomputed')
        silhouette_scores.append(silhouette_avg)
        if silhouette_avg > max_silhouette_score:
            max_silhouette_score = silhouette_avg
            optimal_k = k

    plt.plot(k_range, silhouette_scores, 'bx-')
    plt.xlabel('Number of Clusters (K)')
    plt.ylabel('Silhouette Score')
    plt.title('Silhouette Score for Each K')
    if enable_debug_preview:
        plt.show()
    plt.clf()
    plt.close()
    return optimal_k
def find_optimal_k_elbow(data, k_range, parameters):
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)
    distortions = []
    
    for k in k_range:
        kmeans = KMeans(n_clusters=k, random_state=42)
        kmeans.fit(data)
        distortions.append(kmeans.inertia_)
    
    # Find the "elbow" point
    optimal_k = None
    if len(distortions) > 2:
        # Calculate the rate of change of distortions
        rates_of_change = [(distortions[i-1] - distortions[i]) / distortions[i-1] for i in range(1, len(distortions))]
        # Find the index where the rate of change starts to decrease significantly
        optimal_k_index = rates_of_change.index(max(rates_of_change)) + 1
        optimal_k = k_range[optimal_k_index]
    
    # Plot the elbow curve
    plt.plot(k_range, distortions, 'bx-')
    plt.xlabel('Number of Clusters (K)')
    plt.ylabel('Distortion')
    plt.title('Elbow Method for Optimal K')
    if enable_debug_preview:
        plt.show()
    plt.clf()
    plt.close()
    
    return optimal_k
def demo(lst, k):
    return lst[k:] + lst[:k]
def sgn(x):
    if x >=0 and x<1/3 :
        return 0
    elif x<5/6 and x>=1/2:
        return 1
    elif x>=2/6 and x<3/6:
        return 6*x-2
    elif x>=5/6 and x<=1:
        return -6*x+6
def gai(x,q):
    # 添加可训练参数
    b=108
    indexz=rd.randint(0,b-1)
    indexz2=rd.randint(0,b-1)

    indices=[]
    shape = np.ones([108, 108, 3])#*((1-bi1)/bi1)
    num=int(q*b)

    if indexz+num>=b:
        if indexz2+num>=b:
            shape[indexz:b,indexz2:b,:]=0#0data_numpy
            shape[0:indexz+num-b,0:indexz2+num-b,:]=0       #0data_numpy    
        else:
            shape[indexz:b,indexz2:indexz2+num,:]=0     #0data_numpy
            shape[0:indexz+num-b,indexz2:indexz2+num,:]=0     #0data_numpy   
    else:
        if indexz2+num>=b:
            shape[indexz:indexz+num,indexz2:b,:]=0         #0data_numpy
            shape[indexz:indexz+num,0:indexz2+num-b,:]=0      #0data_numpy    
        else:
            shape[indexz:indexz+num,indexz2:indexz2+num,:]=0      #data_numpy
    x_=shape*x

    return x_
def draw_trajectory(trajectory, image_size=(512, 512)):
    """根据航迹数据绘制图像。"""
    image = Image.new('L', image_size, 0)  # 创建一个黑色背景的图像
    draw = ImageDraw.Draw(image)
    
    # 计算航迹数据的范围
    lats = [point[0] for point in trajectory]
    lons = [point[1] for point in trajectory]
    lat_min, lat_max = min(lats), max(lats)
    lon_min, lon_max = min(lons), max(lons)
    
    # 归一化航迹数据到图像坐标
    def normalize(lat, lon):
        x = int((lon - lon_min) / (lon_max - lon_min) * image_size[0])
        y = int((lat - lat_min) / (lat_max - lat_min) * image_size[1])
        return x, y
    # 绘制航迹
    points = [normalize(lat, lon) for lat, lon in trajectory[:,]]
    draw.line(points, fill=255, width=2)
    
    return image
def letterbox_image(image, size): #无失真的 resize, 将输入变成128*128的形式。
    image = image.convert("RGB")
    iw, ih = image.size
    w, h = size
    scale = min(w/iw, h/ih)
    nw = int(iw*scale)
    nh = int(ih*scale)
    image = image.resize((nw,nh), Image.BICUBIC)
    new_image = Image.new('RGB', size, (0,0,0))
    new_image.paste(image, ((w-nw)//2, (h-nh)//2))
    img2=np.asarray(new_image).astype(np.float32)/255
    photo1=img2
    #photo1 = np.expand_dims(img2,0)    
    return photo1

def autoencoder(parameters,result_1):
    k=parameters["k"]
    datas=result_1["selected_datas"]
    lines=[]
    q=result_1["q"]
    '''for alphabet in paths:                          #试图保存完整的dir来作为x的输入，对于大数据来说应该是一个好方法。
        line = np.load(alphabet)
        line = np.delete(line, 0, axis=1)

        lines.append(line)
                   #path对应目标数据，38条为单位
    '''
    for item in datas:
        line=np.delete(item[2], 0, axis=1)
        lines.append(line)

    size=(108,108)
    X_test=[]        
    for trajectory in lines:
        img = draw_trajectory(trajectory)
        image2 = letterbox_image(img, size)
        X_test.append(image2)

    X_test = np.array(X_test)                  #预测的时候使用，按照顺序排列可以方便聚类。
    X_test=gai(X_test,q)
    
    from random import shuffle
    shuffle_index = np.arange(len(lines), dtype=np.int32)
    shuffle(shuffle_index)
    train_lines = np.array(lines,dtype=np.object)
    train_lines = train_lines[shuffle_index]

    X_test2=[]                                                  #打乱顺序，进行测试和训练。
    for i in train_lines:
        img=draw_trajectory(i)
        image2=letterbox_image(img, size)
        X_test2.append(image2)
    X_test2=np.array(X_test2)


    def generatebatch(X, Y, batch_size):
        while True:
            # 确保 X 和 Y 的行数足够大
            if X.shape[0] < batch_size or Y.shape[0] < batch_size:
                raise ValueError(f"X or Y has fewer rows ({X.shape[0]}, {Y.shape[0]}) than batch size ({batch_size}).")

            # 确保索引范围有效
            max_index = X.shape[0] // batch_size - 1
            if max_index < 0:
                raise ValueError(f"X has fewer rows ({X.shape[0]}) than batch size ({batch_size}).")

            batch_i = rd.randint(0, max_index)
            start = batch_i * batch_size
            end = start + batch_size

            batch_xs = []
            for i in range(batch_size):
                batch_xs.append(gai(X[start + i],q))
            batch_xs = np.array(batch_xs)
            batch_ys = Y[start:end]

            yield batch_xs, batch_ys
                #网络结构备份
    import keras.backend as K
    from keras.callbacks import LearningRateScheduler

    def IoU(y_true, y_pred):                  #利用损失函数，决定权重

        a=5
        y_true=tf.where(y_true>0.5, x=y_true*a, y=y_true)
        y_pred=tf.where(y_true>0.5, x=y_pred*a, y=y_pred)

        return K.mean(K.square(y_pred - y_true), axis=-1)


    def scheduler(epoch):
        # 每隔100个epoch，学习率减小为原来的1/10
        #if epoch % 20 == 0 and epoch != 0:
        if epoch % 100 == 0 and epoch != 0:
            lr = K.get_value(autoencoder.optimizer.lr)
            K.set_value(autoencoder.optimizer.lr, lr * 0.5)
            print("lr changed to {}".format(lr * 0.5))
        return K.get_value(autoencoder.optimizer.lr)
 
    reduce_lr = LearningRateScheduler(scheduler)

    lrr=0.01
    # 自编码器
    input_img = keras.Input(shape=(108, 108, 3))
    x= Dropout(0)(input_img)
    # 编码器部分
    '''x = Conv2D(64, (4, 4), activation='relu', padding='same')(x)
    x = MaxPooling2D((4, 4), padding='same')(x)
    x = Conv2D(32, (4, 4), activation='relu', padding='same')(x)
    encoded = Dense(32, name='d1')(x)

    # 解码器部分
    x = Conv2D(32, (4, 4), activation='relu', padding='same')(encoded)
    x = UpSampling2D((4, 4))(x)
    x = Conv2D(64, (4, 4), activation='relu', padding='same')(x)
    decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same', name='d2')(x)
    '''
    x = Conv2D(6,(3,3),strides=(1,1),activation='relu', padding='same',name='dd')(x)
    x = MaxPooling2D((3, 3),padding='same')(x)
    x = Conv2D(12, (3,3),strides=(1,1), activation='relu', padding='same')(x)
    x = MaxPooling2D((3, 3),padding='same')(x)
    encoded = Dense(2,name='d1')(x) #加不加这个的影响在？,name='d1'
    x = Dense(12,name='d3')(encoded)
    x = UpSampling2D((3, 3))(x)
    x = Conv2D(6, (3, 3), activation='relu', padding='same')(x)
    x = UpSampling2D((3, 3))(x)
    decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same',name='d2')(x)
    autoencoder = keras.Model(input_img, decoded)
    autoencoder.compile(Adam(lr=lrr), loss=IoU)#'binary_crossentropy'
    autoencoder.summary()
    # 训练自编码器
    train_epochs=parameters["train_epochs"]
    train_batch_size=parameters["train_batch_size"]
    #ba=32
    autoencoder.fit_generator(generatebatch(X_test2,X_test2,train_batch_size),
                    epochs=train_epochs,
                    steps_per_epoch=len(X_test2)/train_batch_size, #这个值，决定了最终损失可以下降到什么程度，5 0.04-5   10 0.027    500.0010
                    #steps_per_epoch=len(X_test2)/train_batch_size, 
                    #batch_size=64,
                    #shuffle=True,
                    callbacks=[reduce_lr, CheckCancel()]#callbacks=[reduce_lr]#, early_stopping,tensorboard
                    )
    pre_model=Model(inputs=autoencoder.input,outputs=autoencoder.get_layer('d1').output)#get_layer('dd').
    predict_result3=pre_model.predict(X_test,batch_size=64)
    # 保存模型权重
    saved_weights_path=parameters["saved_weights_path"]
    autoencoder.save_weights(saved_weights_path)

    # 保存编码器模型
    saved_model_path=parameters["saved_model_path"]
    model = Model(inputs=autoencoder.input, outputs=autoencoder.get_layer('d1').output)
    model.save(saved_model_path)
    def getDistance(x,y):
        distance=np.sum((x-y)**2)
        return distance
 
    disjz=-np.ones([len(X_test),len(X_test)])            
    for i in range(len(X_test)): 
        for j in range(i,len(X_test)):
            df1=predict_result3[i]
            df2=predict_result3[j]
            if disjz[i,j]==-1:
                disjz[i,j]=getDistance(df1,df2)
                disjz[j,i]=disjz[i,j]

    k_range = range(2,13)

    best_k = find_optimal_k_elbow(disjz, k_range,parameters)
    print("Optimal K value:", best_k)
    
    if k==0:  
        #DBSCAN
        #根据上面观察到的基础大小设计R
        R=disjz.max()*0.01       #            #邻域半径
        mintr=4              #最小邻居点数                            邻域内的邻居数大于mintr个，则认为他是核心点
        neibor=[]
        for i in range(len(disjz)):
            linshi=[]
            for j in range(len(disjz)):
                if disjz[i,j]<R:
                    linshi.append(j)
            neibor.append(linshi)
        vistied=[]
        for i in range(len(disjz)):
            vistied.append(0)
        a=[]
        for i in range(len(disjz)):
            if vistied[i]!=1 and len(neibor[i])>mintr:   #选择核心点
                cu=[]
                for j in neibor[i]:
                    if vistied[j]!=1:
                        cu.append(j)
                        vistied[j]=1
                while True:
                    cu2=cu
                    for ii in cu:
                        for jj in neibor[ii]:
                            if vistied[jj]!=1:
                                cu.append(jj)
                                vistied[jj]=1
                    if cu2==cu:
                        break
                a.append(cu)
    else:
        k=best_k  
        x=[i for i in range(len(disjz))]              #x直接用1到最终的数字来代替每一条轨迹
        y=np.zeros(len(disjz))
        [a,kc]=julei(k,x,y,disjz)  

    
    return a,disjz
def data_biaoqian(result_1,a):
    paths=[]
    datas = result_1["selected_datas"]
    for item in datas:
        path=item[0]
        paths.append(path) 
    L=300
    width=3
    datas=[]
    k=len(a)
    y=np.zeros(len(paths))
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i    

    for c in range(k):
        m=np.where(y==c)

        for item in m:
            continue
        for i in item:
            df=np.load(paths[i])[:,0:width]
            datas.append([paths[i],c,df])
    return datas
def data_plot(parameters,result_1, a, L, width):
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)
    k = len(a)  # 簇类数量
    paths=[]
    datas = result_1["selected_datas"]
    for item in datas:
        path=item[0]
        paths.append(path) 
    y = np.zeros(len(paths), dtype=int)

    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]] = i
    t = np.arange(0, 0.99, 0.01)
    yan = []
    for i in t:
        y_1 = sgn(i)
        yan.append(y_1)
    yan1 = demo(yan, 33)  # 红色
    yan2 = demo(yan, 66)  # 绿色
    yan3 = yan           # 蓝色
    # 清除当前 figure 中的所有 axes

    fig = plt.figure()
    ax = fig.gca()
    fig.set_size_inches(10, 6)

    legend_elements = []  # 用于存储图例元素
    for c in range(k):
        m = np.where(y == c)
        cc = (yan1[math.floor(c / (k + 1) * 99)], yan2[math.floor(c / (k + 1) * 99)], yan3[math.floor(c / (k + 1) * 99)])

        for i in m[0]:  # 直接使用m[0]来迭代索引
            #dfc = np.load(paths[i])[0:L, 0:width]
            #dfc = datas[i][2][0:L, 0:width]
            dfc = datas[i][2][:, 0:width]
            ax.plot(dfc[:, 1], dfc[:, 2], linewidth=0.5, color=cc, alpha=0.8)

        # 创建一个图例元素
        legend_elements.append(Line2D([0], [0], color=cc, lw=4, label=f'Cluster {c}'))

    # 添加图例
    ax.legend(handles=legend_elements)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    if enable_debug_preview:
        plt.show()
    image_base64_julei = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_julei
def pca_plot(parameters,X,a):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    k=len(a)
    paths=parameters["paths"]
    y=np.zeros(len(paths))
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i
    t=np.arange(0, 0.99, 0.01)
    yan=[]
    for i in t:                              #生成三个谱
        y_1=sgn(i)
        yan.append(y_1)
    yan1=demo(yan,33)   #红  长度为99
    yan2=demo(yan,66)   #绿
    yan3=yan    
    X_pca= pca(X, 2)
    
    fig = plt.figure()
    # 清除当前 figure 中的所有 axes
    plt.clf()
    fig.set_size_inches(10,6)
    for c in range(k):
        m=np.where(y==c) 
        cc=(yan1[math.floor(c/(k+1)*99)],yan2[math.floor(c/(k+1)*99)],yan3[math.floor(c/(k+1)*99)])
        for item in m:
            continue        
        for jj in item:
            plot(X_pca[jj,0],X_pca[jj,1],'b.',color=cc)
    plt.axis('off')
    plt.rcParams['font.sans-serif']=['SimHei']
    plt.title('PCA')
    if enable_debug_preview:
        plt.show()
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_pca = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_pca
    
def data_save(f_dir,f_dir1,a):
    file_name=os.listdir(f_dir)      
    k=len(a)
    y=np.zeros(len(file_name))
    if not os.path.exists(f_dir1):
        os.mkdir(f_dir1)
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i     
    for c in range(k):
        m=np.where(y==c)
        if not os.path.exists(f_dir1+'//'+str(c)):
            os.mkdir(f_dir1+'//'+str(c))
        for item in m:
            continue
        for i in item:
            shutil.copy(f_dir+'//'+file_name[i],f_dir1+'//'+str(c))
def extract_label(path):
    # 使用正则表达式从路径中提取标签
    match = re.search(r'Variflight_(\w+)_\d{8}\.xls\d+\.npy$', path)
    if match:

        return match.group(1)
    else:
        raise ValueError(f"无法从路径 {path} 中提取标签")

def load_data(parameters):
    paths = parameters["paths"]
    result = []
    length=parameters["length"]
    for path in paths:
        #print(f"Processing path: {path}")
        try:
            label = extract_label(path)
            data = np.load(path)  # 加载.npz 或 .npy 文件
            data=data[0:length,:]
            #print(f"Data shape: {data.shape}")
            if data.ndim != 2 or data.shape[1] != 3:
                raise ValueError(f"Data shape {data.shape} is not valid. Expected a 2D array with 3 features per point.")
            result.append([path, label, data])
        except ValueError as e:
            print(e)
            continue

    return result
def select_data(origin_datas, category, number):
    # 首先，我们找出所有的类别
    all_categories = set(data[1] for data in origin_datas)
    
    # 检查输入的有效性
    if category > len(all_categories):
        raise ValueError(f"输入的种类数 {category} 超过了实际类别数 {len(all_categories)}")
    
    # 计算每个类别的数据数量
    category_counts = {cat: len([data for data in origin_datas if data[1] == cat]) for cat in all_categories}
    
    # 找到所有类别中数量最小的类别的数量
    min_per_category = min(category_counts.values())
    
    # 检查 number 是否超过允许的最大值
    if number > category * min_per_category:
        raise ValueError(f"输入的条数 {number} 超过了允许的最大值 {category * min_per_category}")
    
    # 从所有类别中随机选择指定数量的类别
    selected_categories = random.sample(list(all_categories), category)
    
    # 创建一个字典来存储每个类别的数据
    category_data = {cat: [] for cat in selected_categories}
    
    # 将数据按类别分类
    for path, label, data in origin_datas:
        if label in selected_categories:
            category_data[label].append([path, label, data])
    
    # 计算每个类别应该选取多少条数据
    per_category_number = number // category
    remaining = number % category
    
    # 用于存储最终选定的数据
    selected_datas = []
    
    # 从每个类别中选择数据
    for cat in selected_categories:
        # 如果剩余的数量大于0，则给前几个类别多分配一条数据
        if remaining > 0:
            n = per_category_number + 1
            remaining -= 1
        else:
            n = per_category_number
        
        # 从类别中随机选择n条数据
        selected_from_cat = random.sample(category_data[cat], min(n, len(category_data[cat])))
        selected_datas.extend(selected_from_cat)
    
    return selected_datas
def generate_missing_data(selected_datas,  q):
    def mask_data(data, num_points, q):
        # 计算需要遮盖的点数
        num_masked = int(q * num_points)
        
        # 生成随机起始点
        start_index = rd.randint(0, num_points - 1)
        
        # 计算结束点
        end_index = start_index + num_masked
        
        # 如果结束点超出范围，调整起始点
        if end_index >= num_points:
            start_index = num_points - num_masked
            end_index = num_points
        
        # 创建遮盖后的数据
        masked_data = data.copy()
        masked_data[start_index:end_index, :] = 0
        
        return masked_data

    selected_lost_datas = []
    for path, label, data in selected_datas:
        # 假设 data 是一个形状为 (300, 3) 的 numpy 数组
        missing_data = mask_data(data, len(data), q)
        
        # 去除经纬度为 0 的数据点
        filtered_data = missing_data[(missing_data[:, 1] != 0) & (missing_data[:, 2] != 0)]
        
        if filtered_data.size > 0:  # 确保过滤后的数据不为空
            selected_lost_datas.append([path, label, filtered_data])
    
    return selected_lost_datas
# 计算纯度
def purity_score(y_true, y_pred):
    from scipy.stats import mode
    y_true = np.array(y_true)
    y_pred = np.array(y_pred)
    total = 0
    for k in np.unique(y_pred):
        mask = y_pred == k
        total += mode(y_true[mask])[1]
    return total / len(y_true)
# 计算轮廓系数
def calculate_silhouette_score(distance_matrix, cluster_labels):
    # 使用 silhouette_samples 计算每个样本的轮廓系数
    #sample_silhouette_values = silhouette_score(distance_matrix, cluster_labels, metric='precomputed')
    silhouette_avg = silhouette_score(distance_matrix, cluster_labels, metric='precomputed')
    # 计算平均轮廓系数
    #silhouette_avg = np.mean(sample_silhouette_values)
    return silhouette_avg

# 计算调整兰德指数
def calculate_ari(y_true, y_pred):
    ari = adjusted_rand_score(y_true, y_pred)
    return ari

# 绘制柱状图
def zhibiaotu(origin_datas, julei_datas, disjz,parameters):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    # 获取原始标签
    origin_labels = [data[1] for data in origin_datas]
    # 使用LabelEncoder将原始标签映射为0-(n-1)
    le = LabelEncoder()
    origin_labels_encoded = le.fit_transform(origin_labels)


    # 提取聚类标签
    cluster_labels = [data[1] for data in julei_datas]

    # 确保数据点数量一致
    assert len(disjz) == len(cluster_labels), "Distance matrix and labels must have the same number of samples"

    # 计算轮廓系数
    silhouette_avg = calculate_silhouette_score(disjz, cluster_labels)
    
    # 计算纯度
    purity = purity_score(origin_labels_encoded, cluster_labels)
    
    # 计算调整兰德指数
    ari = calculate_ari(origin_labels_encoded, cluster_labels)

    
    # 打印性能指标
    print(f"Silhouette Score: {silhouette_avg}")
    print(f"Purity: {purity}")
    print(f"Adjusted Rand Index (ARI): {ari}")
    
    # 绘制柱状图
    labels = ['Purity', 'Silhouette', 'ARI']
    values = [purity, silhouette_avg, ari]
    
    plt.bar(labels, values, color=['blue', 'green', 'red'])
    plt.ylabel('Scores')
    plt.title('Clustering Performance Metrics')
    if enable_debug_preview:
        plt.show()
    
    # 将图像转换为Base64编码
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_index = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_index
def CancelTrain():
    __CANCEL_WAITHANDLE__=True
# 自定义回调函数
class CheckCancel(Callback):
    def on_batch_end(self, batch, logs=None):
        global __CANCEL_WAITHANDLE__
        if __CANCEL_WAITHANDLE__:
            self.model.stop_training = True
            print("Training stopped due to __CANCEL_WAITHANDLE__ being True")
def reset_canceltrain():
    __CANCEL_WAITHANDLE__=False

__CANCEL_WAITHANDLE__=False







def main1(parameters):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    origin_datas = load_data(parameters)
    category=parameters["category"]       
    number=parameters["number"]
    selected_datas = select_data(origin_datas,category, number)
    # 统计每类的数据条数
    label_counts = {}
    for item in selected_datas:
        label = item[1]
        if label in label_counts:
            label_counts[label] += 1
        else:
            label_counts[label] = 1

    # 绘制柱状图
    labels = list(label_counts.keys())
    counts = list(label_counts.values())

    plt.bar(labels, counts, tick_label=labels)
    plt.xlabel('Category')
    plt.ylabel('Number of Data Points')
    plt.title('Data Distribution by Category')

    # 在柱状图上标注数据条数
    for i in range(len(labels)):
        plt.text(i, counts[i], str(counts[i]), ha='center', va='bottom')
    if enable_debug_preview:
        plt.show()
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_cata = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()
    q=parameters["q"]
    selected_lost_datas = generate_missing_data(selected_datas,q)
    return {
        "base64ImgStr":image_base64_cata,
        "selected_datas":selected_lost_datas,
        "q":q,
    }
    

def main2(parameters,result_1):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    # 设置Matplotlib的字体以支持中文
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
    plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号

    # 功能一：导入数据
    data_path = parameters["data_path"]  # string类型，单条npy文件
    df = np.load(data_path)

    # 确认数据索引
    origin_datas = result_1["selected_datas"]
    data=origin_datas[20][2]

    latitudes = data[:, 1]
    longitudes =data[:, 2]
    # 绘制图像
    plt.figure(figsize=(8, 6))
    plt.scatter(latitudes, longitudes, c='r', marker='o', s=10, label='航迹点')  # 使用蓝色圆点
    plt.xticks([])
    plt.yticks([])
    plt.axis('off')
    plt.title('单条航迹')
    plt.xlabel('Latitude')
    plt.ylabel('Longitude')
    plt.legend()  # 现在不会引发警告
    plt.grid(True)
    if enable_debug_preview:
        plt.show()

    # 将图像转换为Base64编码
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_traj = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    # 返回图像的Base64编码和数据
    traj_data = df
    return {
        "base64ImgStr":image_base64_traj,
        "traj_data":traj_data
    }  
def main3(parameters,result_1):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    reset_canceltrain()
    a,X= autoencoder(parameters,result_1)
    
    image_base64_julei=data_plot(parameters,result_1,a,300,3)
    #image_base64_pca=pca_plot(parameters,X,a)
    disjz_path=parameters["disjz_path"]            
    # 保存距离矩阵为 .txt 文件
    np.savetxt(disjz_path, X, fmt='%.6f', delimiter='\t')

    julei_datas=data_biaoqian(result_1,a)

    origin_datas = result_1["selected_datas"]
    print(len(X))
    print(len(julei_datas))
    image_base64_index=zhibiaotu(origin_datas,julei_datas,X,parameters)
    # 统计每类的数据条数
    label_counts = {}
    for item in julei_datas:
        label = item[1]
        if label in label_counts:
            label_counts[label] += 1
        else:
            label_counts[label] = 1

    # 绘制柱状图
    labels = list(label_counts.keys())
    counts = list(label_counts.values())

    plt.bar(labels, counts, tick_label=labels)
    plt.xlabel('Category')
    plt.ylabel('Number of Data Points')
    plt.title('Data Distribution by Category')

    # 在柱状图上标注数据条数
    for i in range(len(labels)):
        plt.text(i, counts[i], str(counts[i]), ha='center', va='bottom')
    if enable_debug_preview:
        plt.show()
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_julei = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return {
        "base64Imgcluster":image_base64_julei,          #多航迹聚类图
        #"base64ImgPca":image_base64_pca,                #航迹PCA降维散布图
        "cluster_traj_data":julei_datas ,                  #	list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
        "base64ImgIdx":image_base64_index ,              #指标图
        "base64Imgjulei":image_base64_julei,               #类别与每类个数图
    }            



if __name__=="__main__":
    path= r"C:\Users\22377\Desktop\聚类\预处理"
    paths=[]
    for alphabet in os.listdir(path):

        alphabet_path=os.path.join(path,alphabet)
        paths.append(alphabet_path)

    # 数据导入部分，所有数据导入，选择数据，生成缺失数据
    parameters_1= {
        "paths":paths ,   #[绝对地址]绝对地址列表
        "length":300,
        "category":4,     #int类型，种类数，2<=n<=9
        "number":100,     #int类型，选取航迹数量，category<=n<=50*category
        "__ENABLE_DEBUG_PREVIEW__":True,                            #允许内部打印调试内容，如：图表，界面等（内部使用，无需实现）
        "q":0.2,          #float,缺失率
    }
    parameters_2 = {

        "data_path": r'C:\Users\22377\Desktop\聚类\预处理\Variflight_CCA1919_20210316.xls1.npy',     # string类型，数据导入的文件夹路径，文件夹应含有多条npy文件
        "__ENABLE_DEBUG_PREVIEW__":True,                            #允许内部打印调试内容，如：图表，界面等（内部使用，无需实现）
    }

    parameters_3 = {

        "__ENABLE_DEBUG_PREVIEW__":True,                                                                
        "k":2,                                                                            #int，决定聚类方法
        "saved_weights_path":r'C:\Users\22377\Desktop\kangqueshizibianmaqi_weights.h5',        #权重保存路径
        "saved_model_path":r'C:\Users\22377\Desktop\kangqueshizibianmaqi_model.h5',         #模型保存路径
        "train_epochs":100,                                                                #训练轮次
        "train_batch_size":32,                                                             #训练每轮次个数
        "disjz_path":r'C:\Users\22377\Desktop\disjz.txt'
    }
    
    result_1=main1(parameters_1)
    #  result_1:{
    # "base64ImgStr":image_base64_cata,
    #"selected_data":selected_datas
    # }
    result_2=main2(parameters_2,result_1)
    #  result_2:{
    #     "base64ImgStr":image_base64_traj, #xxxxx
    #     "traj_data":traj_data
    # }
    
    result_3=main3(parameters_3,result_1)
     #  reslt_3:{
    #     "base64ImgStr":image_base64_traj, #xxxxx
    #     "traj_data":traj_data
    #"cluster_traj_data":julei_datas ,                  #	list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
    #    "base64ImgPca":image_base64_index               #指标图
    # }


