import h5py
import time
import psutil
import os
import numpy as np
import multiprocessing as mp


def create_test_datasets():
    seed = h5py.File("./seed.hdf5", "a")
    for subject in range(45):
        seed.create_dataset(f"sub_{subject}", data=np.random.random((62, 720000)), chunks=((62, 200)), dtype=np.float32)

        subject_file = h5py.File(f"./subjects/sub_{subject}.hdf5", "a")
        subject_file.create_dataset(f"dataset", data=seed[f"sub_{subject}"], chunks=((62, 200)), dtype=np.float32)
        subject_file.close()

    seed.close()
    
    seed = h5py.File("./seed.hdf5", "r")
    print(seed["sub_0"].dtype)
    seed.close()

    subject_file = h5py.File("./subjects/sub_0.hdf5", "r")
    print(subject_file["dataset"].dtype)
    subject_file.close()


def train(read_func, sub_proc_num=5, epoch_num=2, batch_size=270):
    tic = time.time()
    mem = psutil.Process(os.getpid()).memory_info().rss
    name_info = [None]
    for epoch in range(epoch_num):
        for batch_idx in range(600):
            batch = read_func(name_info, [batch_idx*200, batch_idx*200+1200], sub_proc_num)
            print(len(batch))
            print(len(batch[0]))
            print(len(batch[0][0]))
    print(f"方法: {name_info[0]} | epoch: {epoch_num}, batch_size: {batch_size}, 用时: {time.time()-tic:>.4f}s, 内存占用: {(psutil.Process(os.getpid()).memory_info().rss-mem)/1024}KB")


# 大文件 串行
def read_seed(name_info, batch_idx, sub_proc_num):
    name_info[0] = "大文件 串行"
    results = []
    seed = h5py.File("./seed.hdf5", "r")
    for subject in range(45):
        results.append(seed[f"sub_{subject}"][:, batch_idx[0]:batch_idx[1]])
    seed.close()
    return results

# 小文件 串行
def read_subs(name_info, batch_idx, sub_proc_num):
    name_info[0] = "小文件 串行"
    results = []
    for subject in range(45):
        subject_file = h5py.File(f"./subjects/sub_{subject}.hdf5", "r")
        results.append(subject_file["dataset"][:, batch_idx[0]:batch_idx[1]])
        subject_file.close()
    return results

# 大文件 并行
# def read_seed_parallel(name_info, batch_idx, sub_proc_num):
#     name_info[0] = "大文件 并行"
#     results = []
#     proc_pool = mp.Pool(sub_proc_num)
#     seeds = [h5py.File("./seed.hdf5", "r") for _ in range(sub_proc_num)]

#     results = [proc_pool.apply_async(read_seed_parallel_function, (i, batch_idx)).get() for i in range(45)]
#     [seed.close() for seed in seeds]
#     return results

# # file = h5py.File("./seed.hdf5", "r")
# # print(1)

# def read_seed_parallel_function(subject, batch_idx):
#     return file[f"sub_{subject}"][:, batch_idx[0]:batch_idx[1]]


if __name__=="__main__":
    # create_test_datasets()
    # train(read_seed)
    train(read_subs)
    # train(read_seed_parallel)
   

    