import h5py
from pathlib import Path

# merge to hdf5 datasets
DATASET_PATH = Path('/home/timodw/IDLab/Digihealth-Asia/cardiovascular_monitoring/ugent/heartbeat_classification/processed_data/training_snapshots/incart_stratified_standard_25hz_5sr/fold_0')
test_hdf5 = h5py.File(DATASET_PATH / 'test.hdf5', 'r')
val_hdf5 = h5py.File(DATASET_PATH / 'val.hdf5', 'r')

output_hdf5 = h5py.File(DATASET_PATH / 'test_val.hdf5', 'w')

chunk_size = 1024  # Adjust this based on your memory capacity

# Copy datasets from test_hdf5
for key in test_hdf5.keys():
    test_shape = test_hdf5[key].shape
    test_dtype = test_hdf5[key].dtype
    
    # Create the dataset in the output file
    output_hdf5.create_dataset(key, shape=test_shape, dtype=test_dtype, maxshape=(None,) + test_shape[1:])
    
    # Copy data in chunks
    num_chunks = (test_shape[0] + chunk_size - 1) // chunk_size
    for i in range(num_chunks):
        start = i * chunk_size
        end = min((i + 1) * chunk_size, test_shape[0])
        
        test_data_chunk = test_hdf5[key][start:end]
        output_hdf5[key][start:end] = test_data_chunk
        print(f"Copied test chunk {i+1}/{num_chunks} for {key}. Chunk size: {len(test_data_chunk)}")

# Copy datasets from val_hdf5
for key in val_hdf5.keys():
    val_shape = val_hdf5[key].shape
    
    # Resize the output dataset to accommodate the val data
    output_hdf5[key].resize((output_hdf5[key].shape[0] + val_shape[0]), axis=0)
    
    # Copy data in chunks
    num_chunks = (val_shape[0] + chunk_size - 1) // chunk_size
    for i in range(num_chunks):
        start = i * chunk_size
        end = min((i + 1) * chunk_size, val_shape[0])
        
        val_data_chunk = val_hdf5[key][start:end]
        output_hdf5[key][output_hdf5[key].shape[0] - val_shape[0] + start:output_hdf5[key].shape[0] - val_shape[0] + end] = val_data_chunk
        print(f"Copied val chunk {i+1}/{num_chunks} for {key}. Chunk size: {len(val_data_chunk)}")

test_hdf5.close()
val_hdf5.close()
output_hdf5.close()
