import scipy
import tensorflow as tf
import time
import numpy as np
from scipy.io import loadmat
from sklearn.model_selection import train_test_split
import os
from tensorflow.keras.utils import to_categorical
def ReadAndShuffle(path,data_len=1024):
    # 对文件夹进行重命名保持一致性
    entries = os.listdir(path)
    i=0
    if data_len == 512:
        name_add = '512_data.mat'
    elif data_len == 1024:
        name_add = '1024_data.mat'
    else:
        name_add = '4096_data.mat'
    for entry in entries:
        full_path = os.path.join(path,entry)
        if os.path.isdir(full_path):
            new_name = f"{'dataset_'}{i+1:03d}"
            new_path = os.path.join(path, new_name)
            os.rename(full_path, new_path)
            data_path=os.path.join(new_path, name_add)
            load_mat=loadmat(data_path)
            data=load_mat['row_data']
            data=data.astype(np.float32)
            y_data = np.repeat(i, data.shape[0], axis=0)
            if data.shape[0] != y_data.shape[0]:
                raise ValueError(
                    f"Row count mismatch: x_raw has {data.shape[0]} rows, y_raw has {y_data.shape[0]} rows")
            if i==0:
                x_raw=data
                y_raw=y_data
            else:
                x_raw=np.vstack((x_raw,data))
                y_raw = np.concatenate((y_raw, y_data), axis=0)
            print(f"已导入 {data_path}")
            i = i + 1
    #读取各个文件夹下的 data.mat，并转化成用于训练的x和y，y与文件夹编号一致
    indices = np.arange(x_raw.shape[0])
    np.random.shuffle(indices)

    x_raw = x_raw[indices]
    y_raw = y_raw[indices]
    y_raw = to_categorical(y_raw, num_classes=10)
    x_train,x_valid,y_train,y_valid = train_test_split(x_raw, y_raw,
                                                        test_size=0.20,
                                                        random_state=42)
    x_train = np.expand_dims(x_train, axis=-1)
    x_valid = np.expand_dims(x_valid, axis=-1)


    return x_train,x_valid,y_train,y_valid

def PrintModel(model):
    model.summary()
    # 计算模型参数量
    trainable_params = np.sum([np.prod(v.shape) for v in model.trainable_variables])
    non_trainable_params = np.sum([np.prod(v.shape) for v in model.non_trainable_variables])
    total_params = trainable_params + non_trainable_params

    print(f"Trainable Parameters: {trainable_params}")
    print(f"Non-Trainable Parameters: {non_trainable_params}")
    print(f"Total Parameters: {total_params}")

    # 测试模型推理时间
    # 创建一个随机输入张量（形状与模型输入一致）
    batch_size = 1  # 设定批量大小
    input_data = np.random.random((batch_size,1024,1)).astype(np.float32)
    # 测量推理时间
    num_trials = 100  # 设定运行次数，计算平均时间
    start_time = time.time()

    for _ in range(num_trials):
        _ = model(input_data, training=False)  # 推理（关闭训练模式）
    end_time = time.time()
    average_inference_time = (end_time - start_time) / num_trials

    print(f"Average Inference Time (per input): {average_inference_time * 1000:.4f} ms")
    return

# 将文件名解析为二维标签，横杠前的自定义映射，横杠后的根据本身的编号[1~3,1~5]
def FileNameToLabel(filename):
    mapping = {"cyt": 1, "zhh": 2, "pqx": 3}
    prefix, num = filename.split('_')
    return mapping.get(prefix, 0)
@tf.function
def ReadAndShuffleNpy(path,per_data,data_num=500,data_len=1024,Min_Peak=0.2,distance=64):
    # 循环读取所有文件，形成网络训练用的训练集和测试集等
    if_begin = 1
    x_raw_piece = []
    x_norm_piece = []
    x_raw_original = []
    x_norm_original = []
    y_raw = []
    for file_name in os.listdir(path):
        x_out           = []
        x_norm_out      = []
        x_origin_slice  = []  # 每次处理新文件时初始化为空列表
        x_norm_slice    = []
        if file_name.endswith('.npy'):
            base_name     = os.path.splitext(file_name)[0]
            label         = FileNameToLabel(base_name)
            file_path     = os.path.join(path, file_name)
            data_show     = (np.load(file_path))[10000::]      #加载路径下所有的 npy 后缀文件
            length_data   = (len(data_show) // per_data) * per_data
            data_show     = data_show[:length_data]
            data_show     = np.reshape(data_show, [-1, per_data])
            if data_show.shape[0]>data_num:
                data_show = data_show[:data_num]
            else:
                print("数据长度不足，请减少数据行数")
                break
            for i in range(data_num):
                peak_num  = 0
                hist,_    = np.histogram(data_show[i], bins=data_len )
                hist_norm = hist/max(hist)
                loc_peaks,_ = scipy.signal.find_peaks(hist/max(hist),height=Min_Peak,distance=distance)
                loc_peaks = loc_peaks[loc_peaks>=20]
                for loc in loc_peaks:
                    loc   = int(loc)
                    piece = hist[max(0,loc-distance):min(loc+distance,len(hist))]
                    if len(piece)!=2*distance:
                        print(f"本文件第{i}个信号位置{loc}峰值切片长度不足，为{len(piece)}")
                        print(f"该信号具有{len(loc_peaks)}个峰值")
                    x_origin_slice.append(piece)
                    x_norm_slice.append(piece/max(piece))
                    peak_num+=1
                x_out.append(hist)      #完成数据点到每一行都是定长直方图数据的转变
                x_norm_out.append(hist_norm)
            # 整理成合适的形式
            x_out          = np.array(x_out).astype(np.float32)
            x_norm_out     = np.array(x_norm_out).astype(np.float32)
            x_origin_slice = np.array(x_origin_slice).astype(np.float32)
            x_norm_slice   = np.array(x_norm_slice).astype(np.float32)
            y_out          = np.array(np.repeat(label,x_out.shape[0],axis=0))
            if if_begin==1 :
                x_raw_piece    = x_origin_slice
                x_norm_piece   = x_norm_slice
                x_raw_original = x_out  #  没有寻峰和切片操作的原始数据
                x_norm_original= x_norm_out
                y_raw          = y_out
                if_begin += 1
            else:
                x_raw_piece    = np.vstack((x_raw_piece, x_origin_slice))
                x_norm_piece   = np.vstack((x_norm_piece, x_norm_slice))
                x_raw_original = np.vstack((x_raw_original, x_out))
                x_norm_original = np.vstack((x_norm_original, x_norm_out))
                y_raw          = np.concatenate((y_raw, y_out), axis=0)
            print(f"已导入{file_name}")

    #   打乱数据集
    if len(x_raw_piece[0])!=len(x_raw_original[0]):
        print("峰数大于信号数")
    indices = np.arange(x_raw_original.shape[0])
    np.random.shuffle(indices)
    x_raw_piece    = x_raw_piece[indices]
    x_norm_piece   = x_norm_piece[indices]
    x_raw_original = x_raw_original[indices]
    x_norm_original= x_norm_original[indices]
    y_raw          = y_raw[indices]
    y_raw          = to_categorical(y_raw-1, num_classes=3)
    xrp_train,xrp_valid,yrp_train,yrp_valid    = train_test_split(x_raw_piece, y_raw,
                                                               test_size=0.20,
                                                               random_state=42)
    xnp_train, xnp_valid, ynp_train, ynp_valid = train_test_split(x_norm_piece, y_raw,
                                                                  test_size=0.20,
                                                                  random_state=42)
    xro_train, xro_valid, yro_train, yro_valid = train_test_split(x_raw_original, y_raw,
                                                                  test_size=0.20,
                                                                  random_state=42)
    xno_train, xno_valid, yno_train, yno_valid = train_test_split(x_norm_original, y_raw,
                                                                  test_size=0.20,
                                                                  random_state=42)
    xrp_train = np.expand_dims(xrp_train, axis=-1)
    xnp_train = np.expand_dims(xnp_train, axis=-1)
    xro_train = np.expand_dims(xro_train, axis=-1)
    xno_train = np.expand_dims(xno_train, axis=-1)

    xrp_valid = np.expand_dims(xrp_valid, axis=-1)
    xnp_valid = np.expand_dims(xnp_valid, axis=-1)
    xro_valid = np.expand_dims(xro_valid, axis=-1)
    xno_valid = np.expand_dims(xno_valid, axis=-1)

    x_train_list = [[xrp_train],[xnp_train],[xro_train],[xno_train]]
    x_valid_list = [[xrp_valid],[xnp_valid],[xro_valid],[xno_valid]]
    y_train_list = [[yrp_train],[ynp_train],[yro_train],[yno_train]]
    y_valid_list = [[yrp_valid],[ynp_valid],[yro_valid],[yno_valid]]
    return x_train_list,x_valid_list,y_train_list,y_valid_list

def PrintModelYEDS(model,data):
    # 测试模型推理时间
    # 创建一个随机输入张量（形状与模型输入一致）
    batch_size = 1  # 设定批量大小
    h,w =data.shape
    data = data.reshape([batch_size,h,w])
    # 测量推理时间
    num_trials = 100  # 设定运行次数，计算平均时间
    start_time = time.time()

    for _ in range(num_trials):
        _ = model(data, training=False)  # 推理（关闭训练模式）
    end_time = time.time()
    average_inference_time = (end_time - start_time) / num_trials

    print(f"Average Inference Time (per input): {average_inference_time * 1000:.4f} ms")
    return