#encoding=utf8
import h5py
import os
import numpy as np


class HDF5DatasetWriter:
    def __init__(self, dims=None, outputPath='./train.hdf5', bufSize=1000):
        # 如果输出文件路径存在，提示异常
        # if os.path.exists(outputPath):
        #     raise ValueError(
        #         "The supplied 'outputPath' already exists and cannot be overwritten. Manually delete the file before continuing",
        #         outputPath)

        # 构建两种数据，一种用来存储图像特征一种用来存储标签
        if dims is None:
            dims = [169246, 160, 64]
        self.db = h5py.File(outputPath, "w")
        self.data = self.db.create_dataset("data", dims, dtype="float32")
        self.labels = self.db.create_dataset("labels", [dims[0], 109], dtype="int")

        # 设置buffer大小，并初始化buffer
        self.bufSize = bufSize
        self.buffer = {"data": [], "labels": []}
        self.idx = 0  # 用来进行计数

    def add(self, rows, labels):
        self.buffer["data"].extend([rows])
        self.buffer["labels"].extend(labels)
        # print(self.buffer['data'])
        # self.data[self.idx] = rows
        # self.labels[self.idx] = labels
        # 查看是否需要将缓冲区的数据添加到磁盘中
        if len(self.buffer["data"]) >= self.bufSize:
            self.flush()

    def flush(self):
        # 将buffer中的内容写入磁盘之后重置buffer
        i = self.idx + len(self.buffer["data"])
        self.data[self.idx:i] = self.buffer["data"]
        self.labels[self.idx:i] = self.buffer["labels"]
        self.idx = i
        self.buffer = {"data": [], "labels": []}

    def storeClassLabels(self, classLabels):
        # 存储类别标签
        dt = h5py.special_dtype(vlen=str)  # 表明存储的数据类型为字符串类型
        labelSet = self.db.create_dataset("label_names", (len(classLabels),), dtype=dt)
        # 将classLabels赋值给labelSet但二者不指向同一内存地址
        labelSet[:] = classLabels

    def close(self):
        if len(self.buffer["data"]) > 0:  # 查看是否缓冲区中还有数据
            self.flush()

        self.db.close()


class HDF5DatasetGenerator:
    def __init__(self, dbPath, batchSize, preprocessors=None, aug=None, binarize=True, classes=2):
        # 保存参数列表
        self.batchSize = batchSize
        self.preprocessors = preprocessors
        self.aug = aug
        self.binarize = binarize
        self.classes = classes
        # hdf5数据集
        self.db = h5py.File(dbPath)
        self.numEEGs = self.db['labels'].shape[0]

    def generator(self, passes=np.inf):
        epochs = 0
        # 默认是无限循环遍历，因为np.inf是无穷
        while epochs < passes:
            # 遍历数据
            for i in np.arange(0, self.numEEGs, self.batchSize):
                # 从hdf5中提取数据集
                eegs = self.db['eegs'][i: i + self.batchSize]
                labels = self.db['labels'][i: i + self.batchSize]

                # 检查是否标签需要二值化处理
                # if self.binarize:
                #     labels = np_utils.to_categorical(labels, self.classes)
                # 预处理
                # if self.preprocessors is not None:
                #     proImages = []
                #     for image in images:
                #         for p in self.preprocessors:
                #             image = p.preprocess(image)
                #         proImages.append(image)
                #     images = np.array(proImages)

                # 查看是否存在数据增强，如果存在，应用数据增强
                # if self.aug is not None:
                #     (images, labels) = next(self.aug.flow(images,
                #                                           labels, batch_size=self.batchSize))
                # 返回
                yield (eegs, labels)
            epochs += 1

    def close(self):
        # 关闭db
        self.db.close()

if __name__ =='__main__':

    # path = './dataset/R1/s100r1i1.mat'
    # feature = h5py.File(path)
    # print(feature['temp_data'][0])
    # # print(feature['temp_label'][0])
    # data = h5py.File('./train.hdf5')
    # print(data['labels'][0][0])
    index = 0
    # total mat : 169246
    # train     : 135397
    # test      : 16924
    # val       : 16925
    writer = HDF5DatasetWriter(outputPath='./train.hdf5')
    try:
        for root,dirs,files in os.walk('./dataset'):
            for mat in files:
                index += 1
                if index % 1000 == 0:
                    print(index)
                path = os.path.join(root, mat)
                # print(path)
                data = h5py.File(path)
                # print(data['temp_label'].shape)
                writer.add(data['temp_data'], data['temp_label'])
    finally:
        writer.close()
