import os
import numpy as np
import logging
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from keras.models import Model, load_model
import os,shutil
from warnings import simplefilter
simplefilter(action='ignore',category=FutureWarning)
import numpy as np
import math
from keras.callbacks import ReduceLROnPlateau, LambdaCallback
import matplotlib.pyplot as plt
from matplotlib.pyplot import plot#,savefig
import time
import keras
from keras.models import Model#,Sequential
from keras.optimizers import Adam
from keras.layers import Dense,Dropout,BatchNormalization,GRU,Lambda
from sklearn import preprocessing
import random as rd
import tensorflow as tf
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, adjusted_rand_score
import numpy as np
from collections import defaultdict
import logging
from collections import Counter
from matplotlib.lines import Line2D
from keras.callbacks import Callback, LearningRateScheduler
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
  try:
    tf.config.experimental.set_virtual_device_configuration(
        gpus[0],
        [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*4)])  # 限制GPU内存使用为4GB
    logical_gpus = tf.config.experimental.list_logical_devices('GPU')
    print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
  except RuntimeError as e:
    print(e)
tf.keras.backend.clear_session()  # 清理session
import keras.backend as K
from keras.layers import Input
from keras.layers import RepeatVector
from keras.layers import TimeDistributed
from numpy.linalg import eig
import io
import base64
import re
from sklearn.preprocessing import LabelEncoder
class lstm_model:
    def __init__(self,length,cc):
        #model = Sequential()
        self.block1=GRU(40, return_sequences=True,input_shape=[length,cc],name='c1', recurrent_activation='sigmoid')#
        self.block2=GRU(20,name='d1', recurrent_activation='sigmoid')   
#         self.flatten = Flatten(name = 'flatten')
    def call(self, inputs):
        x = inputs
        x = self.block1(x)
        outputs = self.block2(x)
        # = self.flatten(x)
        return outputs
class delstm_model:
    def __init__(self,length,cc):
        #model = Sequential()
        self.block0=RepeatVector(length)
        self.block1=GRU(20,return_sequences=True,name='e1', recurrent_activation='sigmoid')
        self.block2=GRU(40,return_sequences=True,name='f1', recurrent_activation='sigmoid')#
        self.block3=TimeDistributed(Dense(cc,name='g1'))

    def call(self, inputs):
        x = inputs
        x = self.block0(x)
        x = self.block1(x)
        x = self.block2(x)
        outputs = self.block3(x)
        # = self.flatten(x)
        return outputs
#-------------------------#
#   创建孪生神经网络
#-------------------------#
def siamese(input_shape,length,cc):
    g_model = lstm_model(length,cc)
    g_model2 = delstm_model(length,cc)

    input_1 = Input(shape=input_shape)
    input_2 = Input(shape=input_shape)
    encoded_image_1 = g_model.call(input_1)  
    encoded_image_2 = g_model.call(input_2)
    outt1=g_model2.call(encoded_image_1)
    outt2=g_model2.call(encoded_image_2)
    out = Lambda(lambda tensors:K.abs(tensors[0] - tensors[1]))([encoded_image_1, encoded_image_2])
    #out = Dense(2,activation='relu')(out)
    out = BatchNormalization()(out)
    out = Dropout(0.1)(out)
    
    
    outt= Lambda(lambda tensors: tf.reduce_mean(tensors,axis=1,keepdims = True))(out)
    model = Model([input_1, input_2], [outt1,outt2,outt])
    return model 
def xunlian2(parameters,result_1):
    width=3
    length=300
    cc=width
    input_shape=[length,cc]
    model = siamese(input_shape,length,cc)
    load_weights_path=parameters["load_weights_path"]
    logging.info('导入模型权重...')
    model.load_weights(load_weights_path,by_name=True)
    width=3
    selected_datas=result_1["selected_datas"]
    lines= [item[0] for item in selected_datas]
    cc=width
    min_max=preprocessing.MinMaxScaler()
       
    num=0
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            num=num+1
    pairs_of_images = [np.zeros((num,length,cc)) for i in range(2)]
    n=-1
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            df1=np.load(lines[i-1])[0:length,0:cc]
            df2=np.load(lines[j-1])[0:length,0:cc]
            df1=min_max.fit_transform(df1)
            df2=min_max.fit_transform(df2)
            if len(df1)<length:
                df1=np.array(list(df1)+[[0 for i in range(cc)]]*(length-len(df1)))
            if len(df2)<length:
                df2=np.array(list(df2)+[[0 for i in range(cc)]]*(length-len(df2)))
            n=n+1
            pairs_of_images[0][n, :, :] = df1
            pairs_of_images[1][n, :, :] = df2
    model.summary()        
    predict_results=model.predict(pairs_of_images,batch_size=1024)
    print(len(predict_results[2]))
    print(n)

    
    disjz=-np.ones([len(lines),len(lines)]) 
    

    n=-1     
    for i in range(1,len(lines)+1):
        for j in range(1,i+1):
            n=n+1
            disjz[i-1,j-1]=predict_results[2][n]
            
            disjz[j-1,i-1]=disjz[i-1,j-1]
    print(len(disjz))
    print(n)

    disjz_path=parameters["disjz_path"]        



     
    
    return disjz

def load_data_by_num(parameters):
    logging.info('导入内部数据...')
    current_dir = os.path.dirname(os.path.abspath(__file__))
    data_dir = os.path.join(current_dir, '内部数据')

    # 读取数据的函数
    selected_datas = []
    label_counts = defaultdict(int)
    length = parameters["length"]

    for label, count in parameters.items():
        if label in ["length", "__ENABLE_DEBUG_PREVIEW__", "q"]:
            continue
        label_dir = os.path.join(data_dir, label)
        if not os.path.exists(label_dir):
            logging.warning(f"Label directory {label_dir} does not exist")
            continue

        files = os.listdir(label_dir)
        np.random.shuffle(files)  # 随机打乱文件顺序
        count = min(count, len(files))

        if count > 50:
            raise ValueError(f"Exceeded maximum count for label {label}: {count}")

        for file in files[:count]:
            file_path = os.path.join(label_dir, file)
            data = np.load(file_path)  # 假设数据是以 .npy 格式存储
            if data.shape[1] != 3:
                raise ValueError(f"Data shape {data.shape} is not valid. Expected (N, 3).")
            data = data[:length, :]  # 只取前 length 个点
            if len(data)<length:
                data=np.array(list(data)+[[0 for i in range(3)]]*(length-len(data)))
            selected_datas.append([file_path, label, data])
            label_counts[label] += 1
    logging.info('导入内部数据成功')

    return {

        "selected_datas":selected_datas,

    }

parameters_1= {
        "length":300,     #float，数据长度，不超过400
        "__ENABLE_DEBUG_PREVIEW__":True,                            #允许内部打印调试内容，如：图表，界面等（内部使用，无需实现）        
        #类别标签
        "CCA1919" : 40,          #最多50
        "CES2360": 40,            #最多50
        "CES2551": 40,            #最多50
        "CHH7303": 40,            #最多50
        "CHH7426": 40,       #最多50
        "CHH7695": 40,        #最多50
        "CSN3371": 40,           #最多50
        "CSN6568": 40,           #最多50
        "CSN6761": 40,        #最多50
    }

    
parameters_2 = {

        "__ENABLE_DEBUG_PREVIEW__":True, 
        "labeled_rate":0.1,                                                                   #标记率,0<n<=1
        "load_weights_path":r'C:\Users\22377\Desktop\banjianduzibianmaqi_weights.h5',        #权重保存路径
        "load_model_path":r'C:\Users\22377\Desktop\banjianduzibianmaqi_model.h5',         #模型保存路径

        "train_epochs":80,                                                                #训练轮次
        "train_batch_size":32,                                                             #训练每轮次个数
        "disjz_path":r'C:\Users\22377\Desktop\bjd_disjz.txt',                                  #距离矩阵保存地址
        "out_path":r'C:\Users\22377\Desktop\半监督聚类结果'                                 #聚类结果保存地址
    }
    
    

result_1=load_data_by_num(parameters_1)#内部数据
#  result_1:{
#     "base64ImgStr":image_base64_traj, #xxxxx
#     "traj_data":traj_data
# }


result_2=xunlian2(parameters_2,result_1)
    #  reslt_1:{
#     "base64ImgStr":image_base64_traj, #xxxxx
#     "traj_data":traj_data
#"cluster_traj_data":julei_datas ,                  #	list[string,string,list[[double,double]]] cluster_traj_data 路径、组名、航迹对应关系的列表
#    "base64ImgPca":image_base64_index               #指标图
# }