import pickle
import h5py
import numpy as np

# 读取pickle文件
with open(r'E:\datasets\satgeoloc_dataset\reference_features.pickle', 'rb') as f:
    data = pickle.load(f)

# 将数据转换为numpy数组
keys = list(data.keys())  # 将字符串编码为字节串
values = list(data.values())  # 转换值为np.float32

# 计算每个文件应该包含多少条数据
num_files = 10
data_per_file = len(keys) // num_files

# 保存为h5py文件
for i in range(num_files):
    start_idx = i * data_per_file
    end_idx = (i + 1) * data_per_file if i < num_files - 1 else len(keys)

    file_name = f'E:\\datasets\\satgeoloc_dataset\\reference_features_{i + 1}.h5'

    with h5py.File(file_name, 'w') as hf:
        # 创建两个独立的数据集，一个用于字符串，一个用于数值
        hf.create_dataset('keys', data=keys[start_idx:end_idx])
        hf.create_dataset('values', data=np.stack(values[start_idx:end_idx]))

    print(f'Done with file {i + 1}')

# 处理最后一个文件可能包含的多余数据
if len(keys) % num_files != 0:
    last_file_idx = num_files - 1
    last_file_name = f'E:\\datasets\\satgeoloc_dataset\\reference_features_{last_file_idx + 1}.h5'

    with h5py.File(last_file_name, 'a') as hf:
        # 更新最后一个文件中的数据
        hf['keys'][...] = keys[last_file_idx * data_per_file:]
        hf['values'][...] = np.stack(values[last_file_idx * data_per_file:])

    print(f'Done with the last file')