from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import os,shutil
from warnings import simplefilter
simplefilter(action='ignore',category=FutureWarning)
from collections import Counter
from numpy.linalg import eig
import keras
import math
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot#,savefig
import io
from sklearn import preprocessing
import base64
import random
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
from sklearn.metrics import accuracy_score
from keras.callbacks import ReduceLROnPlateau, LambdaCallback
if gpus:
  try:
    tf.config.experimental.set_virtual_device_configuration(
        gpus[0],
        [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*4)])  # 限制GPU内存使用为4GB
    logical_gpus = tf.config.experimental.list_logical_devices('GPU')
    print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
  except RuntimeError as e:
    print(e)
from PIL import Image, ImageDraw
tf.keras.backend.clear_session()  # 清理session
from scipy.optimize import linear_sum_assignment
import re
import time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential, Model
from keras.wrappers.scikit_learn import KerasClassifier
from keras.utils import np_utils, plot_model
from keras.optimizers import Adam
from keras.layers import Dense, Dropout, Flatten, Conv2D, Conv1D, MaxPooling2D, MaxPooling1D, LeakyReLU, Reshape, BatchNormalization, CuDNNGRU, LSTM, Activation, UpSampling2D, UpSampling1D
import random as rd
import tensorflow as tf
from keras import backend as K
from PIL import Image
import os
import cv2
import matplotlib.image as mpimg
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import silhouette_score, adjusted_rand_score
from matplotlib.lines import Line2D
from keras.callbacks import Callback, LearningRateScheduler
import logging
# 配置日志的基本设置
logging.basicConfig(level=logging.DEBUG, 
                    format='%(asctime)s - %(levelname)s - %(message)s',
                    filename='app.log',  # 将日志写入文件，如果不指定，则默认输出到标准输出（屏幕）
                    filemode='a')       # 文件打开模式，默认为 'a'（追加），这里设置为 'w'（覆盖）
def initcenter(x,k):                                                       #生成随机初始中心点
    b = math.modf(time.time())
    np.random.seed(int(b[0]*1000))
    flag=True
    a=[]
    while flag:
        d=np.random.choice(len(x)-1)
        if d not in a:
            a.append(d)
        if len(a)==k:flag=False                                 #不允许出现重复的中心点
    #print(a)
    #print(2)
    return a
def nearest(kc,i,k,disjz):
    d=[]
    for n in range(k):
        d.append(disjz[kc[n],i])                                             #返回该样本到所有中心点的距离，通过排序的方法定的，不会重复
    #print(d)        #可以用来看计算出的离中心点的距离，和本身应该是0，可用于检验
    w=np.where(d==np.min(d))
    return w[0][0]
def xclassify(x,y,kc,k,disjz):
    for i in range (len(x)):  #对数组的每个值分类
        y[i]=nearest(kc,x[i],k,disjz)             #
    return y
def newcenten(m,ycen,disjz):
    cen=ycen                                    #可能有输入之后只有自己一条的
    for item in m:
        continue
    a=float('inf')
    for i in item:
        distance=0
        for n in item:    
            c=disjz[i,n]
            distance=distance+c
        if a>distance:
            a=distance
            cen=i
    return cen
def kcmean (x,y,kc,k,disjz):  #计算各聚类新均值
    l=list(kc)
    flag=False
    for c in range(k):
        m=np.where(y==c)
        n=newcenten(m,l[c],disjz)
        if l[c]!=n:
            l[c]=n
            flag=True  #聚类中心发生变化
    #print('1')
    return (np.array(l),flag)      
def pca(X,k):
    X = X - X.mean(axis = 0) #向量X去中心化
    X_cov = np.cov(X.T, ddof = 0) #计算向量X的协方差矩阵，自由度可以选择0或1
    eigenvalues,eigenvectors = eig(X_cov) #计算协方差矩阵的特征值和特征向量
    klarge_index = eigenvalues.argsort()[-k:][::-1] #选取最大的K个特征值及其特征向量
    k_eigenvectors = eigenvectors[klarge_index] #用X与特征向量相乘
    return np.dot(X, k_eigenvectors.T) 
def deifen(y,disjz):                                                    #用轮廓系数的方法来判断分类的优劣，输入为一个点，输出为该点得分。
    s=[]    
    for i in range(len(disjz)): 
        a=0
        b=[]
        dd=set(y)
        for n in dd:
            m=np.where(y==n)
            for item in m:
                continue
            distance=0                #到一个簇的距离之和
            for j in item:      #遍历该簇中所有项，距离求和
                c=disjz[i,j]
                distance=distance+c
            distance=distance/len(item)
            if (item == i).any():            #本簇，加入a
                a=distance
            else:                            #非本簇，加入b
                b.append(distance)           
        if b==[]:
            print(y)
        b=min(b)
        z=[a,b]
        if a==0:                             #如果簇内只有一条样本，s为0
            s.append(0)
        else:
            s.append((b-a)/max(z)) 
    s=np.mean(s)                                    
    return s

def julei(k,x,yi,disjz):             #k,类别数，x指代每一个样本（在这里是1到end），yi是
    a=-float('inf')
    for i in range(10):                           #每一个计算十次，取其中最好的一次
        kc=initcenter(x,k)
        y=yi
        flag=True
        count=1
        while flag:
            count=1+count
            y = xclassify(x,y,kc,k,disjz)                     #y就是每个样本分类后的类别
            kc,flag = kcmean(x,y,kc,k,disjz)                 #两种使flag变False 的方法，中心点不变和计算十次
            print(flag)
            if count>10:
                flag=False
                print('z')
        if deifen(y,disjz)>a:
            yrr=y.copy()
            a=deifen(y,disjz)     
            kcr=kc
    b=[]
    for j in range(k):
        a=[]
        for i in range(len(disjz)):
            if (yrr[i])==j:
                a.append(i)
        b.append(a)          
    return b,kcr
def find_optimal_k(data, k_range,parameters):#确定k值方法1
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    max_silhouette_score = -1
    optimal_k = None
    silhouette_scores=[]
    for k in k_range:
        
        kmeans = KMeans(n_clusters=k, random_state=42)
        cluster_labels = kmeans.fit_predict(data)
        silhouette_avg = silhouette_score(data, cluster_labels,metric='precomputed')
        silhouette_scores.append(silhouette_avg)
        if silhouette_avg > max_silhouette_score:
            max_silhouette_score = silhouette_avg
            optimal_k = k

    plt.plot(k_range, silhouette_scores, 'bx-')
    plt.xlabel('Number of Clusters (K)')
    plt.ylabel('Silhouette Score')
    plt.title('Silhouette Score for Each K')
    if enable_debug_preview:
        plt.show()
    plt.clf()
    plt.close()
    return optimal_k
def find_optimal_k_elbow(data, k_range, parameters):#确定k值方法2
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)
    distortions = []
    
    for k in k_range:
        kmeans = KMeans(n_clusters=k, random_state=42)
        kmeans.fit(data)
        distortions.append(kmeans.inertia_)
    
    # Find the "elbow" point
    optimal_k = None
    if len(distortions) > 2:
        # Calculate the rate of change of distortions
        rates_of_change = [(distortions[i-1] - distortions[i]) / distortions[i-1] for i in range(1, len(distortions))]
        # Find the index where the rate of change starts to decrease significantly
        optimal_k_index = rates_of_change.index(max(rates_of_change)) + 1
        optimal_k = k_range[optimal_k_index]
    
    # Plot the elbow curve
    plt.plot(k_range, distortions, 'bx-')
    plt.xlabel('Number of Clusters (K)')
    plt.ylabel('Distortion')
    plt.title('Elbow Method for Optimal K')
    if enable_debug_preview:
        plt.show()
    plt.clf()
    plt.close()
    
    return optimal_k
def compute_gap_statistic(data, k_range, n_refs=3, parameters={}):#确定k值方法3
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)
    gaps = []
    sds = []
    
    for k in k_range:
        # Fit the KMeans model on the actual data
        kmeans = KMeans(n_clusters=k, random_state=42)
        kmeans.fit(data)
        distortion = kmeans.inertia_
        
        # Generate reference datasets and compute their distortions
        ref_dis = []
        for _ in range(n_refs):
            ref_data = np.random.rand(*data.shape)
            kmeans.fit(ref_data)
            ref_dis.append(kmeans.inertia_)
        
        # Compute the mean and standard deviation of the reference distortions
        ref_mean = np.mean(ref_dis)
        ref_sd = np.std(ref_dis)
        
        # Compute the gap statistic
        gap = np.log(ref_mean) - np.log(distortion)
        gaps.append(gap)
        sds.append(ref_sd)
    
    # Find the optimal k
    optimal_k = None
    for i in range(1, len(k_range)):
        if gaps[i] >= gaps[i-1] - sds[i]:
            optimal_k = k_range[i-1]
            break
    if optimal_k is None:
        optimal_k = k_range[-1]
    
    # Plot the gap statistic
    plt.plot(k_range, gaps, 'bx-')
    plt.errorbar(k_range, gaps, yerr=sds, fmt='o', color='r')
    plt.xlabel('Number of Clusters (K)')
    plt.ylabel('Gap Statistic')
    plt.title('Gap Statistic for Optimal K')
    if enable_debug_preview:
        plt.show()
    plt.clf()
    plt.close()
    
    return optimal_k

def demo(lst, k):
    return lst[k:] + lst[:k]
def sgn(x):
    if x >=0 and x<1/3 :
        return 0
    elif x<5/6 and x>=1/2:
        return 1
    elif x>=2/6 and x<3/6:
        return 6*x-2
    elif x>=5/6 and x<=1:
        return -6*x+6
def gai(x,q):
    # 添加可训练参数
    b=108
    indexz=rd.randint(0,b-1)
    indexz2=rd.randint(0,b-1)
    shape = np.ones([108, 108, 3])#*((1-bi1)/bi1)
    num=int(q*b)

    if indexz+num>=b:
        if indexz2+num>=b:
            shape[indexz:b,indexz2:b,:]=0#0data_numpy
            shape[0:indexz+num-b,0:indexz2+num-b,:]=0       #0data_numpy    
        else:
            shape[indexz:b,indexz2:indexz2+num,:]=0     #0data_numpy
            shape[0:indexz+num-b,indexz2:indexz2+num,:]=0     #0data_numpy   
    else:
        if indexz2+num>=b:
            shape[indexz:indexz+num,indexz2:b,:]=0         #0data_numpy
            shape[indexz:indexz+num,0:indexz2+num-b,:]=0      #0data_numpy    
        else:
            shape[indexz:indexz+num,indexz2:indexz2+num,:]=0      #data_numpy
    x_=shape*x

    return x_
def draw_trajectory(trajectory, image_size=(512, 512)):
    """根据航迹数据绘制图像。"""
    image = Image.new('L', image_size, 0)  # 创建一个黑色背景的图像
    draw = ImageDraw.Draw(image)
    
    # 计算航迹数据的范围
    lats = [point[0] for point in trajectory]
    lons = [point[1] for point in trajectory]
    lat_min, lat_max = min(lats), max(lats)
    lon_min, lon_max = min(lons), max(lons)
    
    # 归一化航迹数据到图像坐标
    def normalize(lat, lon):
        x = int((lon - lon_min) / (lon_max - lon_min) * image_size[0])
        y = int((lat - lat_min) / (lat_max - lat_min) * image_size[1])
        return x, y
    # 绘制航迹
    points = [normalize(lat, lon) for lat, lon in trajectory[:,]]
    draw.line(points, fill=255, width=2)
    
    return image
def letterbox_image(image, size): #无失真的 resize, 将输入变成128*128的形式。
    image = image.convert("RGB")
    iw, ih = image.size
    w, h = size
    scale = min(w/iw, h/ih)
    nw = int(iw*scale)
    nh = int(ih*scale)
    image = image.resize((nw,nh), Image.BICUBIC)
    new_image = Image.new('RGB', size, (0,0,0))
    new_image.paste(image, ((w-nw)//2, (h-nh)//2))
    img2=np.asarray(new_image).astype(np.float32)/255
    photo1=img2
    #photo1 = np.expand_dims(img2,0)    
    return photo1
def autoencoder(parameters, result_1):
    datas = result_1["selected_datas"]
    lines = []
    q = result_1["q"]

    for item in datas:
        line = np.delete(item[2], 0, axis=1)
        lines.append(line)

    size = (108, 108)
    X_test = []
    for trajectory in lines:
        img = draw_trajectory(trajectory)
        image2 = letterbox_image(img, size)
        X_test.append(image2)

    X_test = np.array(X_test)
    X_test = gai(X_test, q)

    from random import shuffle
    shuffle_index = np.arange(len(lines), dtype=np.int32)
    shuffle(shuffle_index)
    train_lines = np.array(lines, dtype=np.object)
    train_lines = train_lines[shuffle_index]

    X_test2 = []
    for i in train_lines:
        img = draw_trajectory(i)
        image2 = letterbox_image(img, size)
        X_test2.append(image2)
    X_test2 = np.array(X_test2)

    def generatebatch(X, Y, batch_size):
        while True:
            if X.shape[0] < batch_size or Y.shape[0] < batch_size:
                raise ValueError(f"X or Y has fewer rows ({X.shape[0]}, {Y.shape[0]}) than batch size ({batch_size}).")

            max_index = X.shape[0] // batch_size - 1
            if max_index < 0:
                raise ValueError(f"X has fewer rows ({X.shape[0]}) than batch size ({batch_size}).")

            batch_i = rd.randint(0, max_index)
            start = batch_i * batch_size
            end = start + batch_size

            batch_xs = []
            for i in range(batch_size):
                batch_xs.append(gai(X[start + i], q))
            batch_xs = np.array(batch_xs)
            batch_ys = Y[start:end]

            yield batch_xs, batch_ys
    import keras.backend as K
    from keras.callbacks import LearningRateScheduler
    def IoU(y_true, y_pred):
        a = 5
        y_true = tf.where(y_true > 0.5, x=y_true * a, y=y_true)
        y_pred = tf.where(y_true > 0.5, x=y_pred * a, y=y_pred)

        return tf.reduce_mean(tf.square(y_pred - y_true), axis=-1)

    def scheduler(epoch):
        if epoch % 100 == 0 and epoch != 0:
            lr = tf.keras.backend.get_value(autoencoder.optimizer.lr)
            tf.keras.backend.set_value(autoencoder.optimizer.lr, lr * 0.5)
            print("lr changed to {}".format(lr * 0.5))
        return tf.keras.backend.get_value(autoencoder.optimizer.lr)

    reduce_lr = LearningRateScheduler(scheduler)

    lrr = 0.01
    input_img = keras.Input(shape=(108, 108, 3))
    x = Dropout(0)(input_img)

    x = Conv2D(6, (3, 3), strides=(1, 1), activation='relu', padding='same', name='dd')(x)
    x = MaxPooling2D((3, 3), padding='same')(x)
    x = Conv2D(12, (3, 3), strides=(1, 1), activation='relu', padding='same')(x)
    x = MaxPooling2D((3, 3), padding='same')(x)
    encoded = Dense(2, name='d1')(x)
    x = Dense(12, name='d3')(encoded)
    x = UpSampling2D((3, 3))(x)
    x = Conv2D(6, (3, 3), activation='relu', padding='same')(x)
    x = UpSampling2D((3, 3))(x)
    decoded = Conv2D(3, (3, 3), activation='sigmoid', padding='same', name='d2')(x)

    autoencoder = keras.Model(input_img, decoded)
    autoencoder.compile(Adam(lr=lrr), loss=IoU)
    autoencoder.summary()

    train_epochs = parameters["train_epochs"]
    train_batch_size = parameters["train_batch_size"]

    # 真实标签
    true_labels = np.array([item[1] for item in datas])

    log_callback = LambdaCallback(on_epoch_end=lambda epoch, logs: logging.info(f"已训练 {epoch + 1}/{train_epochs} 轮"))
    # 训练自编码器
    autoencoder.fit_generator(
        generatebatch(X_test2, X_test2, train_batch_size),
        epochs=train_epochs,
        steps_per_epoch=len(X_test2) // train_batch_size,
        callbacks=[reduce_lr,log_callback]
    )

    pre_model = Model(inputs=autoencoder.input, outputs=autoencoder.get_layer('d1').output)
    predict_result3 = pre_model.predict(X_test, batch_size=64)
    logging.info('保存模型权重...')
    # 保存模型权重
    saved_weights_path = parameters["saved_weights_path"]
    autoencoder.save_weights(saved_weights_path)
    logging.info('保存编码器模型...')
    # 保存编码器模型
    saved_model_path = parameters["saved_model_path"]
    model = Model(inputs=autoencoder.input, outputs=autoencoder.get_layer('d1').output)
    model.save(saved_model_path)

    def getDistance(x, y):
        distance = np.sum((x - y) ** 2)
        return distance

    disjz = -np.ones([len(X_test), len(X_test)])
    for i in range(len(X_test)):
        for j in range(i, len(X_test)):
            df1 = predict_result3[i]
            df2 = predict_result3[j]
            if disjz[i, j] == -1:
                disjz[i, j] = getDistance(df1, df2)
                disjz[j, i] = disjz[i, j]

    k_range = range(2, 13)
    best_k = compute_gap_statistic(disjz, k_range, parameters)
    # 计算最优的 k 值
    logging.info('预测k值为：'+str(best_k))
    print("Optimal K value:", best_k)

    k = best_k
    if k == 0:
        # DBSCAN
        R = disjz.max() * 0.01
        mintr = 4
        neibor = []
        for i in range(len(disjz)):
            linshi = []
            for j in range(len(disjz)):
                if disjz[i, j] < R:
                    linshi.append(j)
            neibor.append(linshi)
        vistied = [0] * len(disjz)
        a = []
        for i in range(len(disjz)):
            if vistied[i] != 1 and len(neibor[i]) > mintr:
                cu = []
                for j in neibor[i]:
                    if vistied[j] != 1:
                        cu.append(j)
                        vistied[j] = 1
                while True:
                    cu2 = cu
                    for ii in cu:
                        for jj in neibor[ii]:
                            if vistied[jj] != 1:
                                cu.append(jj)
                                vistied[jj] = 1
                    if cu2 == cu:
                        break
                a.append(cu)
    else:
        k = best_k
        x = [i for i in range(len(disjz))]
        y = np.zeros(len(disjz))
        [a, kc] = julei(k, x, y, disjz)

    return a, disjz, best_k

def data_plot(parameters,result_1, a, width):
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)
    k = len(a)  # 簇类数量
    paths=[]
    datas = result_1["selected_datas"]
    for item in datas:
        path=item[0]
        paths.append(path) 
    y = np.zeros(len(paths), dtype=int)

    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]] = i
    t = np.arange(0, 0.99, 0.01)
    yan = []
    for i in t:
        y_1 = sgn(i)
        yan.append(y_1)
    yan1 = demo(yan, 33)  # 红色
    yan2 = demo(yan, 66)  # 绿色
    yan3 = yan           # 蓝色
    # 清除当前 figure 中的所有 axes

    fig = plt.figure()
    ax = fig.gca()
    fig.set_size_inches(10, 6)

    legend_elements = []  # 用于存储图例元素
    for c in range(k):
        m = np.where(y == c)
        cc = (yan1[math.floor(c / (k + 1) * 99)], yan2[math.floor(c / (k + 1) * 99)], yan3[math.floor(c / (k + 1) * 99)])

        for i in m[0]:  # 直接使用m[0]来迭代索引
            #dfc = np.load(paths[i])[0:L, 0:width]
            #dfc = datas[i][2][0:L, 0:width]
            dfc = datas[i][2][0:300, 0:width]
            ax.plot(dfc[:, 1], dfc[:, 2], linewidth=0.5, color=cc, alpha=0.8)

        # 创建一个图例元素
        legend_elements.append(Line2D([0], [0], color=cc, lw=4, label=f'Cluster {c}'))

    # 添加图例
    ax.legend(handles=legend_elements)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    if enable_debug_preview:
        plt.show()
    image_base64_julei = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_julei
def origin_data_plot(parameters, selected_datas,width):
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)
    datas = selected_datas
    
    # 获取所有唯一的标签
    unique_labels = list(set(item[1] for item in datas))
    k = len(unique_labels)  # 簇类数量
    
    # 为每个标签分配一个索引
    label_to_index = {label: idx for idx, label in enumerate(unique_labels)}
    
    # 生成标签数组
    y = np.array([label_to_index[item[1]] for item in datas], dtype=int)
    
    t = np.arange(0, 0.99, 0.01)
    yan = [sgn(i) for i in t]
    yan1 = demo(yan, 33)  # 红色
    yan2 = demo(yan, 66)  # 绿色
    yan3 = yan           # 蓝色

    # 清除当前 figure 中的所有 axes
    fig = plt.figure()
    ax = fig.gca()
    fig.set_size_inches(10, 6)

    legend_elements = []  # 用于存储图例元素
    for c in range(k):
        m = np.where(y == c)
        cc = (yan1[math.floor(c / (k + 1) * 99)], yan2[math.floor(c / (k + 1) * 99)], yan3[math.floor(c / (k + 1) * 99)])

        for i in m[0]:  # 直接使用m[0]来迭代索引
            dfc = datas[i][2][:, :width]  # 提取航迹数据
            ax.plot(dfc[:, 1], dfc[:, 2], linewidth=0.5, color=cc, alpha=0.8)  # 绘制航迹

        # 创建一个图例元素
        legend_elements.append(Line2D([0], [0], color=cc, lw=4, label=f'Label {unique_labels[c]}'))

    # 添加图例
    ax.legend(handles=legend_elements)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    if enable_debug_preview:
        plt.show()
    image_base64_origin = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_origin

def data_save(f_dir,f_dir1,a):
    file_name=os.listdir(f_dir)      
    k=len(a)
    y=np.zeros(len(file_name))
    if not os.path.exists(f_dir1):
        os.mkdir(f_dir1)
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i     
    for c in range(k):
        m=np.where(y==c)
        if not os.path.exists(f_dir1+'//'+str(c)):
            os.mkdir(f_dir1+'//'+str(c))
        for item in m:
            continue
        for i in item:
            shutil.copy(f_dir+'//'+file_name[i],f_dir1+'//'+str(c))
def extract_label(path):
    # 使用正则表达式从路径中提取标签
    match = re.search(r'Variflight_(\w+)_\d{8}\.xls\d+\.npy$', path)
    if match:

        return match.group(1)
    else:
        raise ValueError(f"无法从路径 {path} 中提取标签")

def load_data(parameters):
    paths = parameters["paths"]
    result = []
    length=parameters["length"]

    for path in paths:
        #print(f"Processing path: {path}")
        try:
            label = extract_label(path)
            data = np.load(path)  # 加载.npz 或 .npy 文件
            data=data[0:length,:]
            #print(f"Data shape: {data.shape}")
            if data.ndim != 2 or data.shape[1] != 3:
                raise ValueError(f"Data shape {data.shape} is not valid. Expected a 2D array with 3 features per point.")
            result.append([path, label, data])
        except ValueError as e:
            print(e)
            continue 
    return result
def select_data(parameters, origin_datas):
    # 从参数中提取各个类别的数量
    num_CCA1919 = parameters["CCA1919"]
    num_CES2360 = parameters["CES2360"]
    num_CES2551 = parameters["CES2551"]
    num_CHH7303 = parameters["CHH7303"]
    num_CHH7426 = parameters["CHH7426"]
    num_CHH7695 = parameters["CHH7695"]
    num_CSN3371 = parameters["CSN3371"]
    num_CSN6568 = parameters["CSN6568"]
    num_CSN6761 = parameters["CSN6761"]

    # 用于存储最终选定的数据
    selected_datas = []

    # 定义一个字典来存储每个类别的剩余数量
    remaining_counts = {
        "CCA1919": num_CCA1919,
        "CES2360": num_CES2360,
        "CES2551": num_CES2551,
        "CHH7303": num_CHH7303,
        "CHH7426": num_CHH7426,
        "CHH7695": num_CHH7695,
        "CSN3371": num_CSN3371,
        "CSN6568": num_CSN6568,
        "CSN6761": num_CSN6761
    }

    # 遍历原始数据
    for path, label, data in origin_datas:
        if label in remaining_counts and remaining_counts[label] > 0:
            # 添加到选定的数据列表中
            selected_datas.append([path, label, data])
            # 减少剩余数量
            remaining_counts[label] -= 1

    return selected_datas
def generate_missing_data(selected_datas,  q):
    def mask_data(data, num_points, q):
        # 计算需要遮盖的点数
        num_masked = int(q * num_points)
        
        # 生成随机起始点
        start_index = rd.randint(0, num_points - 1)
        
        # 计算结束点
        end_index = start_index + num_masked
        
        # 如果结束点超出范围，调整起始点
        if end_index >= num_points:
            start_index = num_points - num_masked
            end_index = num_points
        
        # 创建遮盖后的数据
        masked_data = data.copy()
        masked_data[start_index:end_index, :] = 0
        
        return masked_data

    selected_lost_datas = []
    for path, label, data in selected_datas:
        # 假设 data 是一个形状为 (300, 3) 的 numpy 数组
        missing_data = mask_data(data, len(data), q)
        
        # 去除经纬度为 0 的数据点
        filtered_data = missing_data[(missing_data[:, 1] != 0) & (missing_data[:, 2] != 0)]
        
        if filtered_data.size > 0:  # 确保过滤后的数据不为空
            selected_lost_datas.append([path, label, filtered_data])
    
    return selected_lost_datas

def data_julei(result_1,a):
    paths=[]
    datas = result_1["selected_datas"]
    for item in datas:
        path=item[0]
        paths.append(path) 
    width=3
    datas=[]
    k=len(a)
    y=np.zeros(len(paths))
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i    

    for c in range(k):
        m=np.where(y==c)

        for item in m:
            continue
        for i in item:
            df=np.load(paths[i])[:,0:width]
            datas.append([paths[i],c,df])
    return datas
def plt_category_number(parameters,selected_datas):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    # 统计每类的数据条数
    label_counts = {}
    for item in selected_datas:
        label = item[1]
        if label in label_counts:
            label_counts[label] += 1
        else:
            label_counts[label] = 1

    # 绘制柱状图
    labels = list(label_counts.keys())
    counts = list(label_counts.values())

    plt.bar(labels, counts, tick_label=labels)
    plt.xlabel('Category')
    plt.ylabel('Number of Data Points')
    plt.title('Data Distribution by Category')

    # 在柱状图上标注数据条数
    for i in range(len(labels)):
        plt.text(i, counts[i], str(counts[i]), ha='center', va='bottom')
    if enable_debug_preview:
        plt.show()
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_cata = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()
    return image_base64_cata
#    计算每类的纯度，并将结果存储在一个字典中。
# 定义函数
def map_clusters_to_labels(true_labels, cluster_labels):
    # 初始化一个字典来存储每个聚类的最佳映射
    cluster_to_label = {}

    # 对于每个唯一的聚类标签
    for cluster_id in np.unique(cluster_labels):
        # 获取属于当前聚类的所有样本的真实标签
        labels_in_cluster = true_labels[cluster_labels == cluster_id]

        # 使用 Counter 来找出最常见的标签
        if len(labels_in_cluster) > 0:
            most_common_label = Counter(labels_in_cluster).most_common(1)[0][0]
        else:
            most_common_label = -1  # 如果当前聚类为空，映射为 -1

        # 将当前聚类映射到最常见的标签
        cluster_to_label[cluster_id] = most_common_label

    # 创建一个新的数组来存储映射后的标签
    mapped_labels = np.array([cluster_to_label.get(label, -1) for label in cluster_labels])

    return mapped_labels, cluster_to_label

def calculate_class_accuracy(true_labels, predicted_labels):
    # 获取所有独特的类别标签
    unique_labels = np.unique(true_labels)

    # 初始化一个字典来存储每个类别的准确率
    class_accuracies = {}

    # 对于每个类别
    for label in unique_labels:
        # 提取当前类别的所有索引
        indices = np.where(true_labels == label)[0]

        # 提取当前类别对应的预测标签
        predicted_for_class = predicted_labels[indices]

        # 计算当前类别的准确率
        correct_predictions = np.sum(predicted_for_class == label)
        total_predictions = len(predicted_for_class)
        accuracy = correct_predictions / total_predictions if total_predictions > 0 else 0

        # 存储准确率
        class_accuracies[label] = accuracy

    return class_accuracies

def calculate_class_purity(true_labels, predicted_labels):
    # 初始化存储结果的字典
    purity_dict = {}

    # 获取所有真实标签的唯一标签
    unique_true_labels = np.unique(predicted_labels)

    # 遍历每个真实标签
    for true_label in unique_true_labels:
        # 获取该真实标签类中所有样本的索引
        true_label_indices = np.where(predicted_labels == true_label)[0]

        # 获取该真实标签类中所有样本的预测标签
        predicted_labels_in_true_label = true_labels[true_label_indices]

        # 计算该真实标签类中每个预测标签的出现次数
        label_counts = Counter(predicted_labels_in_true_label)

        # 获取出现次数最多的那个预测标签及其出现次数
        most_common_label, most_common_count = label_counts.most_common(1)[0]

        # 计算该真实标签类的纯度
        true_label_purity = most_common_count / len(true_label_indices)

        # 存储结果
        purity_dict[true_label] = true_label_purity

    return purity_dict

def average_dicts(dict_list):
    # 初始化一个字典来存储每个键的总和
    sum_dict = dict.fromkeys(dict_list[0], 0)
    # 初始化一个字典来存储每个键出现的次数
    count_dict = dict.fromkeys(dict_list[0], 0)

    # 遍历字典列表，累加每个键的值并更新计数器
    for d in dict_list:
        for key in d:
            if key not in sum_dict:
                sum_dict[key] = 0
                count_dict[key] = 0
            sum_dict[key] += d[key]
            count_dict[key] += 1

    # 计算平均值
    average_dict = {key: sum_dict[key] / count_dict[key] for key in sum_dict}

    return average_dict

def add_average_to_dict(data_dict, weight):
    # 计算共同键对应的值相乘后再相加
    result = sum(data_dict[key] * weight[key] for key in data_dict if key in weight)

    # 将平均值添加到字典中
    data_dict["average"] = result
    return data_dict, result

def plot_dual_bars(f1_scores, accuracies, true_labels, parameters, title='聚类性能'):
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)

    # 将字典转换为列表
    labels = list(f1_scores.keys())

    set1 = set(true_labels)
    set2 = set(labels)

    # 计算 list1 中缺少的元素
    missing_elements = set1 - set2
    list_missing_elements = list(missing_elements)
    # 使用字典推导式创建字典
    result_dict = {key: 0 for key in list_missing_elements}

    labels = list_missing_elements + labels

    # 使用 update 方法合并字典
    f1_scores.update(result_dict)
    accuracies.update(result_dict)

    f1_values = [f1_scores[label] for label in labels]
    accuracy_values = [accuracies[label] for label in labels]

    # 设置条形图的位置和宽度
    x = np.arange(len(labels))  # 类别的索引位置
    width = 0.35  # 条形图的宽度

    # 创建图形和坐标轴
    fig, ax = plt.subplots()

    # 在 ax1 上绘制 F1 分数的条形图
    rects1 = ax.bar(x - width/2, f1_values, width, label='纯度', color='black')
    # 在 ax2 上绘制准确率的条形图
    rects2 = ax.bar(x + width/2, accuracy_values, width, label='准确率', color='white', edgecolor='black')

    # 添加一些文本描述
    ax.set_ylabel('性能')
    ax.set_title(title)
    ax.set_xticks(x)
    ax.set_xticklabels(labels)
    ax.legend()

    # 自动旋转 x 轴标签以避免重叠
    plt.xticks(rotation=45)

    # 设置 Y 轴的范围
    ax.set_ylim(0, 1.2)

    # 添加数值标签到条形图上方，并保留两位小数
    def autolabel(rects, ax):
        """Attach a text label above each bar in *rects*, displaying its height with two decimal places."""
        for rect in rects:
            height = rect.get_height()
            ax.annotate(f'{height:.2f}',  # 保留两位小数
                        xy=(rect.get_x() + rect.get_width() / 2, height),
                        xytext=(0, 3),  # 3 points vertical offset
                        textcoords="offset points",
                        ha='center', va='bottom', fontsize=8)

    autolabel(rects1, ax)
    autolabel(rects2, ax)

    # 显示图形
    plt.tight_layout()
    if enable_debug_preview:
        plt.show()

    # 将图像保存到字节流
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)

    # 将字节流编码为Base64
    image_base64 = base64.b64encode(buf.getvalue()).decode('utf-8')

    plt.close()
    return image_base64
def save_clusters_to_files(out_path, julei_datas):
    """
    将聚类结果按照类别标签保存到指定的输出文件夹中。

    :param out_path: 输出文件夹路径
    :param julei_datas: 包含路径、标签和数据的列表，格式为 [(path, label, data), ...]
    """
    # 确保输出文件夹存在
    if not os.path.exists(out_path):
        os.makedirs(out_path)

    # 创建一个字典来存储每个类别的数据
    cluster_dict = {}

    # 遍历每个数据项
    for path, label, data in julei_datas:
        if label not in cluster_dict:
            cluster_dict[label] = []
        cluster_dict[label].append((os.path.basename(path), data))

    # 遍历每个聚类标签
    for label, data_list in cluster_dict.items():
        # 创建一个文件夹来保存当前聚类的所有数据
        cluster_folder = os.path.join(out_path, f'cluster_{label}')
        if not os.path.exists(cluster_folder):
            os.makedirs(cluster_folder)

        # 保存每个数据项
        for file_name, data in data_list:
            file_path = os.path.join(cluster_folder, file_name)
            np.save(file_path, data)

    print(f"聚类结果已保存到 {out_path}")
def CancelTrain():
    __CANCEL_WAITHANDLE__=True
def reset_canceltrain():
    __CANCEL_WAITHANDLE__=False
# 自定义回调函数
class CheckCancel(Callback):
    def on_batch_end(self, batch, logs=None):
        global __CANCEL_WAITHANDLE__
        if __CANCEL_WAITHANDLE__:
            self.model.stop_training = True
            print("Training stopped due to __CANCEL_WAITHANDLE__ being True")


__CANCEL_WAITHANDLE__=False







def main1(parameters):
    logging.info('导入原始数据...')
    origin_datas = load_data(parameters)
    selected_datas = select_data(parameters,origin_datas)
    logging.info('导入原始数据成功')
    q=parameters["q"]
    logging.info('生成缺失率为'+str(q)+'的缺失数据...')
    selected_lost_datas = generate_missing_data(selected_datas,q)
    logging.info('缺失数据生成')
    logging.info('缺失航迹图生成...')
    image_base64_origin= origin_data_plot(parameters,selected_lost_datas,3)
    return {
        "base64ImgStr":image_base64_origin,
        "selected_datas":selected_lost_datas,
        "q":q,
    }
    

def main2(parameters,result_1):
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用 SimHei 字体
    plt.rcParams['axes.unicode_minus'] = False    # 正常显示负号
    
    # 主循环
    loop_num = 1
    class_accuracies_list = []
    class_purity_list = []
    out_path=parameters["out_path"]
    
    for _ in range(loop_num):
        reset_canceltrain()
        # 记录算法运行时间
        logging.info('开始聚类...')
        start_time = time.time()  # 开始计时
        a,X,best_k= autoencoder(parameters,result_1)
        end_time = time.time()  # 结束计时
        elapsed_time = (end_time - start_time) # 计算耗时
        # 将时间格式化为分钟和秒
        minutes = int(elapsed_time // 60)
        seconds = int(elapsed_time % 60)
        # 记录日志
        logging.info(f"训练完成。总耗时: {minutes} 分钟 {seconds} 秒")
        class_accuracies_list =[]
        class_purity_list = []
        logging.info('生成聚类航迹图...')
        image_base64_julei=data_plot(parameters,result_1,a,3)
        logging.info('保存距离矩阵...')
        disjz_path=parameters["disjz_path"]            
        # 保存距离矩阵为 .txt 文件
        np.savetxt(disjz_path, X, fmt='%.6f', delimiter='\t')
        julei_datas=data_julei(result_1,a)

        origin_datas = result_1["selected_datas"]
        
        # 提取真实标签和聚类标签
        #true_labels = [item[1] for item in origin_datas]
        #cluster_labels = [item[1] for item in julei_datas]
        true_labels = np.array([item[1] for item in origin_datas])
        cluster_labels = np.array([item[1] for item in julei_datas])
        # 假设这里进行了聚类操作，生成新的聚类标签
        new_cluster_labels = cluster_labels  # 这里假设聚类标签不变，实际应用中可能不同
        logging.info('保存结果...')
        save_clusters_to_files(out_path,julei_datas)
        # 将聚类标签映射到真实标签
        mapped_labels, cluster_to_label = map_clusters_to_labels(true_labels, new_cluster_labels)
        logging.info(f"标签映射: {cluster_to_label}")
        logging.info('计算纯度和准确率...')
        # 计算每类数据的纯度和准确率
        class_accuracies = calculate_class_accuracy(true_labels, mapped_labels)
        class_purities = calculate_class_purity(true_labels, mapped_labels)

        # 保存结果
        class_accuracies_list.append(class_accuracies)
        class_purity_list.append(class_purities)

    # 计算平均值
    average_class_accuracies = average_dicts(class_accuracies_list)
    average_class_purity = average_dicts(class_purity_list)
    element_counts = Counter(true_labels)
    data_len=len(true_labels)
    new_element_counts = {key: value / data_len for key, value in element_counts.items()}
    # 计算加权平均值

    average_class_accuracies, average_accuracies = add_average_to_dict(average_class_accuracies, new_element_counts)
    average_class_purity, average_purity = add_average_to_dict(average_class_purity, new_element_counts)
    logging.info('生成指标...')
    # 调用函数绘制图表
    parameters = {"__ENABLE_DEBUG_PREVIEW__": True}
    image_base64_index = plot_dual_bars(average_class_purity, average_class_accuracies, true_labels, parameters, title='聚类性能')
    for label in average_class_accuracies.keys():
        logging.info(f"{label}: 准确率: {average_class_accuracies[label]:.4f}, 纯度: {average_class_purity[label]:.4f}")

    # 记录平均准确率和平均纯度到日志
    logging.info(f"平均准确率: {average_accuracies:.4f}, 平均纯度: {average_purity:.4f}")
    # 打印结果
    print("Average Class Accuracies:", average_class_accuracies)
    print("Average Accuracy:", average_accuracies)
    print("Average Class Purity:", average_class_purity)
    print("Average Purity:", average_purity)
        # 调用函数绘制图表，将这个图像整体画上

    

    return {
        "base64Imgcluster":image_base64_julei,              #多航迹聚类图
        "cluster_traj_data":julei_datas ,                   #list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
        "base64ImgIdx":image_base64_index ,                 #纯度、准确率指标图
        'average_accuracies': average_accuracies,           #整体平均准确率
        'average_purity': average_purity,                   #整体平均纯度
        'time': elapsed_time,                               #算法运行时间
        'best_k':best_k,                                    #聚类簇数
    }            



if __name__=="__main__":
    path= r"C:\Users\22377\Desktop\聚类\预处理"
    paths=[]
    for alphabet in os.listdir(path):

        alphabet_path=os.path.join(path,alphabet)
        paths.append(alphabet_path)

    # 数据导入部分，所有数据导入，选择数据，生成缺失数据
    parameters_1= {
        "paths":paths ,   #[绝对地址]绝对地址列表
        "length":300,     #float，数据长度，不超过600
        "__ENABLE_DEBUG_PREVIEW__":True,                            #允许内部打印调试内容，如：图表，界面等（内部使用，无需实现）
        "q":0.2,          #float,缺失率
        
        #类别标签
        "CCA1919" : 40,          #最多50
        "CES2360": 40,            #最多50
        "CES2551": 40,            #最多50
        "CHH7303": 40,            #最多50
        "CHH7426": 40,       #最多50
        "CHH7695": 40,        #最多50
        "CSN3371": 40,           #最多50
        "CSN6568": 40,           #最多50
        "CSN6761": 40,        #最多50
    }


    parameters_2 = {

        "__ENABLE_DEBUG_PREVIEW__":True,                                                                                                                                           #int，决定聚类方法
        "saved_weights_path":r'C:\Users\22377\Desktop\kangqueshizibianmaqi_weights.h5',        #权重保存路径
        "saved_model_path":r'C:\Users\22377\Desktop\kangqueshizibianmaqi_model.h5',         #模型保存路径
        "train_epochs":100,                                                                #训练轮次
        "train_batch_size":32,                                                             #训练每轮次个数
        "disjz_path":r'C:\Users\22377\Desktop\disjz.txt',
        "out_path":r'C:\Users\22377\Desktop\抗缺失聚类结果'
    }
    
    result_1=main1(parameters_1)
    #  result_1:{
       # "base64ImgStr":image_base64_origin,
       # "selected_datas":selected_lost_datas,
       # "q":q,
    # }

    
    result_2=main2(parameters_2,result_1)
     #  result_2:{
    '''"base64Imgcluster":image_base64_julei,              #多航迹聚类图
        "cluster_traj_data":julei_datas ,                   #list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
        "base64ImgIdx":image_base64_index ,                 #纯度、准确率指标图
        'average_accuracies': average_accuracies,           #整体平均准确率
        'average_purity': average_purity,                   #整体平均纯度
        'time': elapsed_time,                               #算法运行时间
        'best_k':best_k,  '''                                  #聚类簇数
    # }


