import h5py
import numpy as np


# =================================================== 以下是工具类方法 ==============================================
# 读取 .h5py
def read_h5(path_h5):
    # 指定HDF5文件的路径
    # path_h5 = 'dataset.h5'
    # 使用h5py的File函数以读取模式打开HDF5文件
    with h5py.File(path_h5, 'r') as hf:
        # 从文件中读取inputs和outputs数据集
        inputs = hf['inputs'][()]
        outputs = hf['outputs'][()]
        # 确认数据已正确读取
        print("Inputs shape:", inputs.shape)
        print("Outputs shape:", outputs.shape)
        # 此处可以进行进一步的数据处理或分析
        # 例如：打印部分数据检查
        # print("First element of Inputs:", inputs[0])
        # print("First element of Outputs:", outputs[0])
        # 返回
        return inputs, outputs


# 保存 .h5py
def save_h5(path_h5, X, Y):
    # 假设 X 是一个形状为 (60000, 500, 500) 的numpy数组，Y 是形状为 (60000, 8, 8) 的numpy数组
    # with h5py.File('dataset.h5', 'w') as hf:
    with h5py.File(path_h5, 'w') as hf:
        hf.create_dataset('inputs', data=X, compression="gzip", compression_opts=9)
        hf.create_dataset('outputs', data=Y, compression="gzip", compression_opts=9)


# 读取h5数据
def load_h5_data(path_h5, keys=None):
    print(f"[CNN dataset gen] 从文件{path_h5}加载数据")
    data_dict = {}
    with h5py.File(path_h5, 'r') as hf:
        if keys is None:
            keys = hf.keys()  # 默认加载所有数据集
        for key in keys:
            if key in hf:
                print(f"[CNN dataset gen] 加载数据 {key}")
                data_dict[key] = hf[key][:]
            else:
                print(f"[CNN dataset gen] 数据 {key} 不存在")
    return data_dict


# 生成器读取h5
def data_generator(path_h5_files, batch_size=8, keys=['phase', 'pattern']):
    while True:
        for path_h5 in path_h5_files:
            with h5py.File(path_h5, 'r') as hf:
                num_samples = hf[keys[0]].shape[0]
                steps_per_epoch = num_samples // batch_size
                for i in range(steps_per_epoch):
                    start = i * batch_size
                    end = (i + 1) * batch_size
                    # 打印读取的文件名称和数据的起止序号
                    print(f"[Data Generator] Reading from file: {path_h5}, samples: {start} to {end - 1}")
                    X_batch = hf[keys[1]][start:end]
                    Y_batch = hf[keys[0]][start:end]
                    # 如果需要调整数据格式，例如添加通道维度
                    if len(X_batch.shape) == 3:
                        X_batch = X_batch[..., np.newaxis]
                    yield X_batch, Y_batch


# 批量保存h5
def save_h5_in_chunks(path_h5, dict_data):
    # dict_data = {"X": X, "Y": Y, "Z": Z}
    print(f"[CNN dataset gen] 保存数据到文件{path_h5}")
    with h5py.File(path_h5, 'a') as hf:
        for key, data in dict_data.items():
            # 假设 dict_data 的每个元素都是 numpy 数组，例如 X 是一个形状为 (60000, 500, 500)
            print(f"[CNN dataset gen] 准备保存数据 {key}")
            if key in hf:
                print(f"[CNN dataset gen] 保存数据--添加 {key} 到文件 {path_h5}")
                current_size = hf[key].shape[0]
                hf[key].resize((current_size + data.shape[0], *data.shape[1:]))
                hf[key][current_size:] = data
            else:
                print(f"[CNN dataset gen] 保存数据--新建 {key} 到文件 {path_h5}")
                hf.create_dataset(key, data=data, maxshape=(None, *data.shape[1:]), chunks=True, compression="gzip", compression_opts=9)




# ==================================================== 以下是测试方法 ===================================================
def test_save_load_trunk():
    # 示例数据
    X = np.random.rand(60, 500, 500)
    Y = np.random.rand(60, 8, 8)
    Z = np.random.rand(60, 10, 10)
    dict_data = {"X": X, "Y": Y, "Z": Z}
    # 保存示例数据
    save_h5_in_chunks("../files/test_h5_save_trunk_load.h5", dict_data)
    # 加载示例数据
    loaded_data = load_h5_data("../files/test_h5_save_trunk_load.h5")
    # 打印加载数据的一部分
    for key, data in loaded_data.items():
        print(f"Data for key: {key}")
        print(data[:2])  # 打印前两个元素
        print()  # 添加空行以便于阅读




if __name__ == '__main__':
    # 配置日志，默认打印到控制台，也可以设置打印到文件
    # setup_logging()
    # setup_logging(log_file="../../files/logs/log_multi_beam_CNN.log")

    # 获取日志记录器并记录日志
    # logger = logging.getLogger("[test][h5]")
    # logger.info("test save trunk & load")
    test_save_load_trunk()