import os,shutil
from warnings import simplefilter
simplefilter(action='ignore',category=FutureWarning)
import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot#,savefig
import time
import keras  
from keras.models import Model#,Sequential
from keras.optimizers import Adam
from keras.layers import Dense,GRU
from sklearn import preprocessing
import tensorflow as tf
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, adjusted_rand_score,silhouette_samples
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from keras.callbacks import Callback, LearningRateScheduler
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
  try:
    tf.config.experimental.set_virtual_device_configuration(
        gpus[0],
        [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*4)])  # 限制GPU内存使用为4GB
    logical_gpus = tf.config.experimental.list_logical_devices('GPU')
    print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
  except RuntimeError as e:
    print(e)

tf.keras.backend.clear_session()  # 清理session
import keras.backend as K
from keras.callbacks import LearningRateScheduler
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from numpy.linalg import eig
from tempo import *
from tempo0 import *  # 含有setParameters等函数
import io
import base64
import re
from matplotlib.lines import Line2D
def initcenter(x,k):                                                       #生成随机初始中心点
    b = math.modf(time.time())
    np.random.seed(int(b[0]*1000))
    flag=True
    a=[]
    while flag:
        d=np.random.choice(len(x)-1)
        if d not in a:
            a.append(d)
        if len(a)==k:flag=False                                 #不允许出现重复的中心点

    return a

def nearest(kc,i,k,disjz):
    d=[]
    for n in range(k):
        d.append(disjz[kc[n],i])                                             #返回该样本到所有中心点的距离，通过排序的方法定的，不会重复
    #print(d)        #可以用来看计算出的离中心点的距离，和本身应该是0，可用于检验
    w=np.where(d==np.min(d))
    return w[0][0]

def xclassify(x,y,kc,k,disjz):
    for i in range (len(x)):  #对数组的每个值分类
        y[i]=nearest(kc,x[i],k,disjz)             #
    return y

def newcenten(m,ycen,disjz):
    cen=ycen                                    #可能有输入之后只有自己一条的
    for item in m:
        continue
    a=float('inf')
    for i in item:
        distance=0
        for n in item:    
            c=disjz[i,n]
            distance=distance+c
        if a>distance:
            a=distance
            cen=i
    return cen

def kcmean (x,y,kc,k,disjz):  #计算各聚类新均值
    l=list(kc)
    flag=False
    for c in range(k):
        m=np.where(y==c)
        n=newcenten(m,l[c],disjz)
        if l[c]!=n:
            l[c]=n
            flag=True  #聚类中心发生变化
    #print('1')
    return (np.array(l),flag)      
  
def deifen(y,disjz):                                                    #用轮廓系数的方法来判断分类的优劣，输入为一个点，输出为该点得分。
    s=[]    
    for i in range(len(disjz)): 
        a=0
        b=[]
        dd=set(y)
        for n in dd:
            m=np.where(y==n)
            for item in m:
                continue
            distance=0                #到一个簇的距离之和
            for j in item:      #遍历该簇中所有项，距离求和
                c=disjz[i,j]
                distance=distance+c
            distance=distance/len(item)
            if (item == i).any():            #本簇，加入a
                a=distance
            else:                            #非本簇，加入b
                b.append(distance)           
        if b==[]:
            print(y)
        b=min(b)
        z=[a,b]
        if a==0:                             #如果簇内只有一条样本，s为0
            s.append(0)
        else:
            s.append((b-a)/max(z)) 
    s=np.mean(s)                                    
    return s

def julei(k,x,yi,disjz):             #k,类别数，x指代每一个样本（在这里是1到end），yi是
    a=-float('inf')
    for i in range(10):                           #每一个计算十次，取其中最好的一次
        kc=initcenter(x,k)
        y=yi
        flag=True
        count=1
        while flag:
            count=1+count
            y = xclassify(x,y,kc,k,disjz)                     #y就是每个样本分类后的类别
            kc,flag = kcmean(x,y,kc,k,disjz)                 #两种使flag变False 的方法，中心点不变和计算十次
            print(flag)
            if count>10:
                flag=False
                print('z')
        if deifen(y,disjz)>a:
            yrr=y.copy()
            a=deifen(y,disjz)     
            kcr=kc
    b=[]
    for j in range(k):
        a=[]
        for i in range(len(disjz)):
            if (yrr[i])==j:
                a.append(i)
        b.append(a)          
    return b,kcr


def demo(lst, k):
    return lst[k:] + lst[:k]
def sgn(x):
    if x >=0 and x<1/3 :
        return 0
    elif x<5/6 and x>=1/2:
        return 1
    elif x>=2/6 and x<3/6:
        return 6*x-2
    elif x>=5/6 and x<=1:
        return -6*x+6
def guiyi(disjz):
    [aa,bb]=disjz.shape
    disjz=disjz.reshape([aa*bb,1])
    min_max=preprocessing.MinMaxScaler()
    disjz=min_max.fit_transform(disjz)
    disjz=disjz.reshape([aa,bb])
    return disjz
def pca(X,k):
    X = X - X.mean(axis = 0) #向量X去中心化
    X_cov = np.cov(X.T, ddof = 0) #计算向量X的协方差矩阵，自由度可以选择0或1
    eigenvalues,eigenvectors = eig(X_cov) #计算协方差矩阵的特征值和特征向量
    klarge_index = eigenvalues.argsort()[-k:][::-1] #选取最大的K个特征值及其特征向量
    k_eigenvectors = eigenvectors[klarge_index] #用X与特征向量相乘
    return np.dot(X, k_eigenvectors.T)


def find_optimal_k(data, k_range,parameters):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    max_silhouette_score = -1
    optimal_k = None
    silhouette_scores=[]
    for k in k_range:
        
        kmeans = KMeans(n_clusters=k, random_state=42)
        cluster_labels = kmeans.fit_predict(data)
        silhouette_avg = silhouette_score(data, cluster_labels,metric='precomputed')
        silhouette_scores.append(silhouette_avg)
        if silhouette_avg > max_silhouette_score:
            max_silhouette_score = silhouette_avg
            optimal_k = k
    
    plt.plot(k_range, silhouette_scores, 'bx-')
    plt.xlabel('Number of Clusters (K)')
    plt.ylabel('Silhouette Score')
    plt.title('Silhouette Score for Each K')
    if enable_debug_preview:
        plt.show()
    plt.clf()
    plt.close()
    return optimal_k
def data_plot(parameters, a, L, width):
    enable_debug_preview = parameters.get("__ENABLE_DEBUG_PREVIEW__", False)
    k = len(a)  # 簇类数量
    paths = parameters["paths"]
    y = np.zeros(len(paths), dtype=int)

    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]] = i
    t = np.arange(0, 0.99, 0.01)
    yan = []
    for i in t:
        y_1 = sgn(i)
        yan.append(y_1)
    yan1 = demo(yan, 33)  # 红色
    yan2 = demo(yan, 66)  # 绿色
    yan3 = yan           # 蓝色

    fig = plt.figure()
    ax = fig.gca()
    fig.set_size_inches(10, 6)

    legend_elements = []  # 用于存储图例元素
    for c in range(k):
        m = np.where(y == c)
        cc = (yan1[math.floor(c / (k + 1) * 99)], yan2[math.floor(c / (k + 1) * 99)], yan3[math.floor(c / (k + 1) * 99)])

        for i in m[0]:  # 直接使用m[0]来迭代索引
            dfc = np.load(paths[i])[0:L, 0:width]
            ax.plot(dfc[:, 1], dfc[:, 2], linewidth=0.5, color=cc, alpha=0.8)

        # 创建一个图例元素
        legend_elements.append(Line2D([0], [0], color=cc, lw=4, label=f'Cluster {c}'))

    # 添加图例
    ax.legend(handles=legend_elements)

    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    if enable_debug_preview:
        plt.show()
    image_base64_julei = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_julei

    
def pca_plot(parameters,X,a):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    k=len(a)
    paths=parameters["paths"]
    y=np.zeros(len(paths))
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i
    t=np.arange(0, 0.99, 0.01)
    yan=[]
    for i in t:                              #生成三个谱
        y_1=sgn(i)
        yan.append(y_1)
    yan1=demo(yan,33)   #红  长度为99
    yan2=demo(yan,66)   #绿
    yan3=yan    
    X_pca= pca(X, 2)
    fig = plt.figure()
    fig.set_size_inches(10,6)
    for c in range(k):
        m=np.where(y==c) 
        cc=(yan1[math.floor(c/(k+1)*99)],yan2[math.floor(c/(k+1)*99)],yan3[math.floor(c/(k+1)*99)])
        for item in m:
            continue        
        for jj in item:
            plot(X_pca[jj,0],X_pca[jj,1],'b.',color=cc)
    plt.axis('off')
    plt.rcParams['font.sans-serif']=['SimHei']
    plt.title('PCA')
    if enable_debug_preview:
        plt.show()
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_pca = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_pca

    
def data_save(f_dir,f_dir1,a):
    file_name=os.listdir(f_dir)      
    k=len(a)
    y=np.zeros(len(file_name))
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i    
    if not os.path.exists(f_dir1):
            os.mkdir(f_dir1) 
    for c in range(k):
        m=np.where(y==c)
        if not os.path.exists(f_dir1+'//'+str(c)):
            os.mkdir(f_dir1+'//'+str(c))
        for item in m:
            continue
        for i in item:
            shutil.copy(f_dir+'//'+file_name[i],f_dir1+'//'+str(c))

def data_biaoqian(parameters,a):
    paths=parameters['paths']
    L=300
    width=3
    datas=[]
    k=len(a)
    y=np.zeros(len(paths))
    for i in range(len(a)):
        for j in range(len(a[i])):
            y[a[i][j]]=i    

    for c in range(k):
        m=np.where(y==c)

        for item in m:
            continue
        for i in item:
            df=np.load(paths[i])[0:L,0:width]
            datas.append([paths[i],c,df])
    return datas



def autoencoder(parameters):
    length=300
    cc=3
    paths = parameters["paths"]

    X_test=[]

    for i in paths:
        df=np.load(str(i))[0:length,0:cc]
        if len(df)<length:
            df=np.array(list(df)+[[0 for i in range(cc)]]*(length-len(df)))
        X_test.append(np.array(df[0:length]))
    X_test=np.array(X_test)
    print(X_test)
    k = parameters["k"]

    
    def scheduler(epoch):
        # 每隔100个epoch，学习率减小为原来的1/10
        if epoch % 100 == 0 and epoch != 0:
            lr = K.get_value(autoencoder.optimizer.lr)
            K.set_value(autoencoder.optimizer.lr, lr * 0.2)
            print("lr changed to {}".format(lr * 0.2))
        return K.get_value(autoencoder.optimizer.lr)
    reduce_lr = LearningRateScheduler(scheduler)

    n_in=len(X_test[1])
    input_size = (n_in,cc)
    lrr=0.001
    shu=keras.Input(shape=input_size)
    x = GRU(40,return_sequences=True,name='c1', recurrent_activation='sigmoid')(shu)
    x = GRU(20,name='d1', recurrent_activation='sigmoid')(x)
    encoded = RepeatVector(n_in)(x)
    x= GRU(20,return_sequences=True, recurrent_activation='sigmoid')(encoded)#()
    x = GRU(40,return_sequences=True, recurrent_activation='sigmoid')(x)
    decoded=TimeDistributed(Dense(cc))(x)
    autoencoder = Model(inputs=shu, outputs=decoded)
    autoencoder.compile(Adam(lr=lrr),loss='mse')#,accuracy=
    autoencoder.summary()
    train_epochs=parameters["train_epochs"]
    train_batch_size=parameters["train_batch_size"]
    # 训练自编码器
    autoencoder.fit(X_test,X_test,
                    epochs=train_epochs,
                    batch_size=train_batch_size,
                    callbacks=[reduce_lr, CheckCancel()]#callbacks=[reduce_lr]#, early_stopping,tensorboard
                    #shuffle=True
                   )
    autoencoder.save_weights('zibianmaqi_weights.h5')
    model=Model(inputs=autoencoder.input,outputs=autoencoder.get_layer('d1').output)
    predict_results=model.predict(X_test,batch_size=64)
        # 保存模型权重
    saved_weights_path=parameters["saved_weights_path"]
    autoencoder.save_weights(saved_weights_path)

    # 保存编码器模型
    saved_model_path=parameters["saved_model_path"]
    model = Model(inputs=autoencoder.input, outputs=autoencoder.get_layer('d1').output)
    model.save(saved_model_path)
    #距离矩阵
    def getDistance(x,y):
        distance=np.sum((x-y)**2)
        return distance

    disjz=-np.ones([len(X_test),len(X_test)])            
    for i in range(len(X_test)): 
        for j in range(i,len(X_test)):
            df1=predict_results[i]
            df2=predict_results[j]
            if disjz[i,j]==-1:
                disjz[i,j]=getDistance(df1,df2)
                disjz[j,i]=disjz[i,j]
    disjz_path=parameters["disjz_path"]            
    # 保存距离矩阵为 .txt 文件
    np.savetxt(disjz_path, disjz, fmt='%.6f', delimiter='\t')
    k_range = range(3,13)
    best_k = find_optimal_k(disjz, k_range,parameters)
    print("Optimal K value:", best_k)
    if k==0:  
        #DBSCAN
        #根据上面观察到的基础大小设计R
        R=disjz.max()*0.01       #            #邻域半径
        mintr=4              #最小邻居点数                            邻域内的邻居数大于mintr个，则认为他是核心点
        neibor=[]
        for i in range(len(disjz)):
            linshi=[]
            for j in range(len(disjz)):
                if disjz[i,j]<R:
                    linshi.append(j)
            neibor.append(linshi)
        vistied=[]
        for i in range(len(disjz)):
            vistied.append(0)
        a=[]
        for i in range(len(disjz)):
            if vistied[i]!=1 and len(neibor[i])>mintr:   #选择核心点
                cu=[]
                for j in neibor[i]:
                    if vistied[j]!=1:
                        cu.append(j)
                        vistied[j]=1
                while True:
                    cu2=cu
                    for ii in cu:
                        for jj in neibor[ii]:
                            if vistied[jj]!=1:
                                cu.append(jj)
                                vistied[jj]=1
                    if cu2==cu:
                        break
                a.append(cu)
    else:
        
        k=best_k
        x=[i for i in range(len(disjz))]              #x直接用1到最终的数字来代替每一条轨迹
        y=np.zeros(len(disjz))
        [a,kc]=julei(k,x,y,disjz)  
    return a,disjz
def extract_label(path):
    # 使用正则表达式从路径中提取标签
    match = re.search(r'Variflight_(\w+)_\d{8}\.xls\d+\.npy$', path)
    if match:
        return match.group(1)
    else:
        raise ValueError(f"无法从路径 {path} 中提取标签")

def load_data(parameters):
    paths = parameters["paths"]
    result = []

    for path in paths:
        print(f"Processing path: {path}")
        try:
            label = extract_label(path)
            data = np.load(path)  # 加载.npz 或 .npy 文件
            print(f"Data shape: {data.shape}")
            if data.ndim != 2 or data.shape[1] != 3:
                raise ValueError(f"Data shape {data.shape} is not valid. Expected a 2D array with 3 features per point.")
            result.append([path, label, data])
        except ValueError as e:
            print(e)
            continue

    return result
# 计算纯度
def purity_score(y_true, y_pred):
    from scipy.stats import mode
    y_true = np.array(y_true)
    y_pred = np.array(y_pred)
    total = 0
    for k in np.unique(y_pred):
        mask = y_pred == k
        total += mode(y_true[mask])[1]
    return total / len(y_true)
# 计算轮廓系数
def calculate_silhouette_score(distance_matrix, cluster_labels):
    # 使用 silhouette_samples 计算每个样本的轮廓系数
    #sample_silhouette_values = silhouette_score(distance_matrix, cluster_labels, metric='precomputed')
    silhouette_avg = silhouette_score(distance_matrix, cluster_labels, metric='precomputed')
    # 计算平均轮廓系数
    #silhouette_avg = np.mean(sample_silhouette_values)
    return silhouette_avg

# 计算调整兰德指数
def calculate_ari(y_true, y_pred):
    ari = adjusted_rand_score(y_true, y_pred)
    return ari

# 绘制柱状图
def zhibiaotu(origin_datas, julei_datas, disjz,parameters):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    # 获取原始标签
    origin_labels = [data[1] for data in origin_datas]
    # 使用LabelEncoder将原始标签映射为0-(n-1)
    le = LabelEncoder()
    origin_labels_encoded = le.fit_transform(origin_labels)


    # 提取聚类标签
    cluster_labels = [data[1] for data in julei_datas]

    # 确保数据点数量一致
    assert len(disjz) == len(cluster_labels), "Distance matrix and labels must have the same number of samples"

    # 计算轮廓系数
    silhouette_avg = calculate_silhouette_score(disjz, cluster_labels)
    
    # 计算纯度
    purity = purity_score(origin_labels_encoded, cluster_labels)
    
    # 计算调整兰德指数
    ari = calculate_ari(origin_labels_encoded, cluster_labels)

    
    # 打印性能指标
    print(f"Silhouette Score: {silhouette_avg}")
    print(f"Purity: {purity}")
    print(f"Adjusted Rand Index (ARI): {ari}")
    
    # 绘制柱状图
    labels = ['Purity', 'Silhouette', 'ARI']
    values = [purity, silhouette_avg, ari]

    plt.bar(labels, values, color=['blue', 'green', 'red'])
    plt.ylabel('Scores')
    plt.title('Clustering Performance Metrics')
    if enable_debug_preview:
        plt.show()
    
    # 将图像转换为Base64编码
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_index = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    return image_base64_index

def CancelTrain():
    __CANCEL_WAITHANDLE__=True
# 自定义回调函数
class CheckCancel(Callback):
    def on_batch_end(self, batch, logs=None):
        global __CANCEL_WAITHANDLE__
        if __CANCEL_WAITHANDLE__:
            self.model.stop_training = True
            print("Training stopped due to __CANCEL_WAITHANDLE__ being True")
def reset_canceltrain():
    __CANCEL_WAITHANDLE__=False

__CANCEL_WAITHANDLE__=False



def main1(parameters):
    enable_debug_preview=parameters.get("__ENABLE_DEBUG_PREVIEW__",False)
    # 设置Matplotlib的字体以支持中文
    plt.rcParams['font.sans-serif'] = ['SimHei']  # 使用黑体
    plt.rcParams['axes.unicode_minus'] = False  # 正常显示负号

    # 功能一：导入数据
    data_path = parameters["data_path"]  # string类型，单条npy文件
    df = np.load(data_path)
    # 确认数据索引
    latitudes = df[:, 1]
    longitudes = df[:, 2]

    # 检查数据范围
    print("Latitude range:", latitudes.min(), latitudes.max())
    print("Longitude range:", longitudes.min(), longitudes.max())

    # 绘制图像
    plt.figure(figsize=(8, 6))
    plt.scatter(latitudes, longitudes, c='r', marker='o', s=50, label='航迹点')  # 使用蓝色圆点
    plt.xticks([])
    plt.yticks([])
    plt.axis('off')
    plt.title('单条航迹')
    plt.xlabel('Latitude')
    plt.ylabel('Longitude')
    plt.legend()  # 现在不会引发警告
    plt.grid(True)
    if enable_debug_preview:
        plt.show()

    # 将图像转换为Base64编码
    buf = io.BytesIO()
    plt.savefig(buf, format='png')
    buf.seek(0)
    image_base64_traj = base64.b64encode(buf.getvalue()).decode('utf-8')
    plt.clf()
    plt.close()

    # 返回图像的Base64编码和数据
    traj_data = df
    return {
        "base64ImgStr":image_base64_traj,
        "traj_data":traj_data
    }
def main2(parameters):
    reset_canceltrain()
    a,X= autoencoder(parameters)
    image_base64_julei=data_plot(parameters,a,300,3)
    image_base64_pca=pca_plot(parameters,X,a)
    julei_datas=data_biaoqian(parameters,a)

    # 获得最终的二维列表
    origin_datas = load_data(parameters)

    image_base64_index=zhibiaotu(origin_datas,julei_datas,X,parameters)
    #path1= r"C:\Users\22377\Desktop\聚类\预处理"
    #path2= r"C:\Users\22377\Desktop\聚类\时序聚类"
    #data_save(path1,path2,a)
    return {
        "base64Imgcluster":image_base64_julei,          #多航迹聚类图
        "base64ImgPca":image_base64_pca,                #航迹PCA降维散布图
        "cluster_traj_data":julei_datas ,                  #	list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
        "base64ImgIdx":image_base64_index               #指标图
    }


            
if __name__=="__main__":
    path= r"C:\Users\22377\Desktop\聚类\预处理"
    paths=[]
    length=300
    cc=3
    i=0
    for alphabet in os.listdir(path):
        i=i+1
        if i==450:break
        alphabet_path=os.path.join(path,alphabet)
        paths.append(alphabet_path)

        # 数据输入部分

    parameters_1 = {

        "data_path": r'C:\Users\22377\Desktop\聚类\预处理\Variflight_CCA1919_20210316.xls1.npy',     # string类型，数据导入的文件夹路径，文件夹应含有多条npy文件
        "__ENABLE_DEBUG_PREVIEW__":True,                            #允许内部打印调试内容，如：图表，界面等（内部使用，无需实现）
    }
    parameters_2 = {

        "__ENABLE_DEBUG_PREVIEW__":True, 
        "paths":paths,                                                                   #[绝对地址]
        "k":9,                                                                      #int，决定聚类方法
        "saved_weights_path":r'C:\Users\22377\Desktop\banjianduzibianmaqi_weights.h5',        #权重保存路径
        "saved_model_path":r'C:\Users\22377\Desktop\banjianduzibianmaqi_model.h5',         #模型保存路径
        "train_epochs":100,                                                                #训练轮次
        "train_batch_size":32,                                                             #训练每轮次个数
        "disjz_path":r'C:\Users\22377\Desktop\disjz.txt'
    }
    

    


    result_1=main1(parameters_1)
    #  result_1:{
    #     "base64ImgStr":image_base64_traj, #xxxxx
    #     "traj_data":traj_data
    # }


    result_2=main2(parameters_2)
     #  reslt_1:{
    #     "base64ImgStr":image_base64_traj, #xxxxx
    #     "traj_data":traj_data
    #"cluster_traj_data":julei_datas ,                  #	list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
    #    "base64ImgPca":image_base64_index               #指标图
    # }



