import os
import numpy as np
import pickle
import re
from spikeProcessor import SpikeProcessor


# 对文件名按照数字大小进行排序
def sort_by_number_in_filename(files):
    # 使用正则表达式匹配文件名中的数字
    def get_number(filename):
        match = re.search(r'\d+', filename)
        return int(match.group(0)) if match else 0

    # 按数字排序文件名
    return sorted(files, key=get_number)


# 加载文件夹中的全部数据
def all_data_loader(path):
    data_list = []
    label_list = []
    file_names = os.listdir(path)
    for i in range(len(file_names)):
        if file_names[i] == 'data':
            data_path = os.path.join(path, 'data')
            print('find data_folder')
    for i in range(len(file_names)):
        if file_names[i] == 'label':
            label_path = os.path.join(path, 'label')
            print('find label_folder')
    for i in range(len(file_names)):
        if file_names[i] == 'info.pkl':
            info_file = os.path.join(path, 'info.pkl')
            print('find info_file')

    sorted_data_names = sort_by_number_in_filename(os.listdir(data_path))
    sorted_label_names = sort_by_number_in_filename(os.listdir(label_path))
    data_pkl_files = [os.path.join(data_path, f) for f in sorted_data_names]
    label_npy_files = [os.path.join(label_path, f) for f in sorted_label_names]

    for data_pkl_file in data_pkl_files:
        with open(data_pkl_file, 'rb') as file:
            data = pickle.load(file)
            data_list.append(data)

    for label_npy_file in label_npy_files:
        label = np.load(label_npy_file)
        label_list.append(label)

    with open(info_file, 'rb') as file:
        info_data = pickle.load(file)

    return data_list, label_list, info_data


# 对spike数据进行分箱和平滑
def data_bin_smooth(data, n_points, bin_size, fs):
    new_data = []
    for i in range(len(data)):
        spk_processor = SpikeProcessor(n_points[i], bin_size, fs)
        binned_fr = spk_processor.binning(data[i])
        smoothed_fr = spk_processor.smoothing(binned_fr).T

        new_data.append(smoothed_fr)

    return new_data


# 对原始电生理信号数据进行分箱
def data_bin(data, bin_size):
    new_data = []
    new_data_bin = []
    for i in range(len(data)):
        new_data.append(data[i].T)
    for i in range(len(new_data)):
        num_bins = (new_data[i].shape[0] + bin_size - 1) // bin_size
        binned = np.zeros((num_bins, bin_size, new_data[i].shape[1]))

        for k in range(num_bins):
            start = k * bin_size
            end = min((k + 1) * bin_size, new_data[i].shape[0])
            binned[k, :end - start] = new_data[i][start:end]

        binned = np.mean(binned, axis=1)
        new_data_bin.append(binned)

    return new_data_bin


# 对标签行为数据进行分箱
def label_bin(label, bin_size):
    new_label = []
    for i in range(len(label)):
        num_bins = (label[i].shape[0] + bin_size - 1) // bin_size
        binned = np.zeros((num_bins, bin_size, label[i].shape[1]))

        for k in range(num_bins):
            start = k * bin_size
            end = min((k + 1) * bin_size, label[i].shape[0])
            binned[k, :end - start] = label[i][start:end]

        binned = np.mean(binned, axis=1)

        new_label.append(binned)

    return new_label


# 计算全部数据的稀疏度
def sparse_ratio(lst):
    non_zero_count = sum(np.count_nonzero(elem) for elem in lst)
    total_count = sum(elem.size for elem in lst)
    return 1 - non_zero_count / total_count


# 计算单个数据的稀疏度进行平均并且求方差
def calc_sparsity(arr_list):
    sparsity_list = []
    for arr in arr_list:
        num_zeros = np.count_nonzero(arr == 0)
        sparsity = num_zeros / arr.size
        sparsity_list.append(sparsity)
    print(sparsity_list)
    mean_sparsity = np.mean(sparsity_list)
    var_sparsity = np.var(sparsity_list)
    return mean_sparsity, var_sparsity


if __name__ == '__main__':
    print('ok')
