# coding=utf8


import h5py as h5
import numpy as np
import os


class Demo:

    @staticmethod
    def h5_file():
        """

        class File(h5py._hl.group.Group)
         |  File(name, mode=None, driver=None, libver=None, userblock_size=None, swmr=False, **kwds)
         |
         |  Represents an HDF5 file.
         |
         |  Method resolution order:
         |      File
         |      h5py._hl.group.Group
         |      h5py._hl.base.HLObject
         |      h5py._hl.base.CommonStateObject
         |      h5py._hl.base.MutableMappingHDF5
         |      h5py._hl.base.MappingHDF5
         |      collections.abc.MutableMapping
         |      collections.abc.Mapping
         |      collections.abc.Collection
         |      collections.abc.Sized
         |      collections.abc.Iterable
         |      collections.abc.Container
         |      builtins.object
         |
         |  Methods defined here:
         |
         |  __enter__(self)
         |
         |  __exit__(self, *args)
         |
         |  __init__(self, name, mode=None, driver=None, libver=None, userblock_size=None, swmr=False, **kwds)
         |      Create a new file object.
         |
         |      See the h5py user guide for a detailed explanation of the options.
         |
         |      name
         |          Name of the file on disk.  Note: for files created with the 'core'
         |          driver, HDF5 still requires this be non-empty.
         |      mode
         |          r        Readonly, file must exist
         |          r+       Read/write, file must exist
         |          w        Create file, truncate if exists
         |          w- or x  Create file, fail if exists
         |          a        Read/write if exists, create otherwise (default)
         |      driver
         |          Name of the driver to use.  Legal values are None (default,
         |          recommended), 'core', 'sec2', 'stdio', 'mpio'.
         |      libver
         |          Library version bounds.  Currently only the strings 'earliest'
         |          and 'latest' are defined.
         |      userblock
         |          Desired size of user block.  Only allowed when creating a new
         |          file (mode w, w- or x).
         |      swmr
         |          Open the file in SWMR read mode. Only used when mode = 'r'.
         |      Additional keywords
         |          Passed on to the selected file driver.
         |
         |  __repr__(self)
         |
         |  close(self)
         |      Close the file.  All open objects become invalid
         |
         |  flush(self)
         |      Tell the HDF5 library to flush its buffers.
         |
         |  ----------------------------------------------------------------------
         |  Data descriptors defined here:
         |
         |  attrs
         |      Attributes attached to this object
         |
         |  driver
         |      Low-level HDF5 file driver used to open file
         |
         |  fid
         |      File ID (backwards compatibility)
         |
           |  filename
         |      File name on disk
         |
         |  libver
         |      File format version bounds (2-tuple: low, high)
         |
         |  mode
         |      Python mode used to open file
         |
         |  swmr_mode
         |      Controls single-writer multiple-reader mode
         |
         |  userblock_size
         |      User block size (in bytes)
         |
         |  ----------------------------------------------------------------------
         |  Data and other attributes defined here:
         |
         |  __abstractmethods__ = frozenset()
         |
         |  ----------------------------------------------------------------------
         |  Methods inherited from h5py._hl.group.Group:
         |
         |  __contains__(self, name)
         |      Test if a member name exists
         |
         |  __delitem__(self, name)
         |      Delete (unlink) an item from this group.
         |
         |  __getitem__(self, name)
         |      Open an object in the file
         |
         |  __iter__(self)
         |      Iterate over member names
         |
         |  __len__(self)
         |      Number of members attached to this group
         |
         |  __setitem__(self, name, obj)
         |      Add an object to the group.  The name must not already be in use.
         |
         |      The action taken depends on the type of object assigned:
         |
         |      Named HDF5 object (Dataset, Group, Datatype)
         |          A hard link is created at "name" which points to the
         |          given object.
         |
         |      SoftLink or ExternalLink
         |          Create the corresponding link.
         |
         |      Numpy ndarray
         |          The array is converted to a dataset object, with default
         |          settings (contiguous storage, etc.).
         |
         |      Numpy dtype
         |          Commit a copy of the datatype as a named datatype in the file.
         |
         |      Anything else
         |          Attempt to convert it to an ndarray and store it.  Scalar
         |          values are stored as scalar datasets. Raise ValueError if we
         |          can't understand the resulting array dtype.
        """

        # File.copy()
        """
|  copy(self, source, dest, name=None, shallow=False, expand_soft=False, expand_external=False, expand_refs=False, without_attrs=False)
 |      Copy an object or group.
 |
 |       The source can be a path, Group, Dataset, or Datatype object.  The
 |       destination can be either a path or a Group object.  The source and
 |       destinations need not be in the same file.
 |
 |       If the source is a Group object, all objects contained in that group
 |       will be copied recursively.
 |
 |       When the destination is a Group object, by default the target will
 |       be created in that group with its current name (basename of obj.name).
 |       You can override that by setting "name" to a string.
 |
 |       There are various options which all default to "False":
 |
 |        - shallow: copy only immediate members of a group.
 |
 |        - expand_soft: expand soft links into new objects.
 |
 |        - expand_external: expand external links into new objects.
 |
 |        - expand_refs: copy objects that are pointed to by references.
 |
 |        - without_attrs: copy object without copying attributes.
 |
 |      Example:
 |
 |       >>> f = File('myfile.hdf5')
 |       >>> f.listnames()
 |       ['MyGroup']
 |       >>> f.copy('MyGroup', 'MyCopy')
 |       >>> f.listnames()
 |       ['MyGroup', 'MyCopy']
 |
        """

        # File.create_dataset
        """
 |  create_dataset(self, name, shape=None, dtype=None, data=None, **kwds)
 |      Create a new HDF5 dataset
 |
 |      name
 |          Name of the dataset (absolute or relative).  Provide None to make
 |          an anonymous dataset.
 |      shape
 |          Dataset shape.  Use "()" for scalar datasets.  Required if "data"
 |          isn't provided.
 |      dtype
 |          Numpy dtype or string.  If omitted, dtype('f') will be used.
 |          Required if "data" isn't provided; otherwise, overrides data
 |          array's dtype.
 |      data
 |          Provide data to initialize the dataset.  If used, you can omit
 |          shape and dtype arguments.
 |
 |      Keyword-only arguments:
 |
 |      chunks
 |          (Tuple) Chunk shape, or True to enable auto-chunking.
 |      maxshape
 |          (Tuple) Make the dataset resizable up to this shape.  Use None for
 |          axes you want to be unlimited.
 |      compression
 |          (String or int) Compression strategy.  Legal values are 'gzip',
 |          'szip', 'lzf'.  If an integer in range(10), this indicates gzip
 |          compression level. Otherwise, an integer indicates the number of a
 |          dynamically loaded compression filter.
 |      compression_opts
 |          Compression settings.  This is an integer for gzip, 2-tuple for
 |          szip, etc. If specifying a dynamically loaded compression filter
 |          number, this must be a tuple of values.
 |      scaleoffset
 |          (Integer) Enable scale/offset filter for (usually) lossy
 |          compression of integer or floating-point data. For integer
 |          data, the value of scaleoffset is the number of bits to
 |          retain (pass 0 to let HDF5 determine the minimum number of
 |          bits necessary for lossless compression). For floating point
 |          data, scaleoffset is the number of digits after the decimal
 |          place to retain; stored values thus have absolute error
 |          less than 0.5*10**(-scaleoffset).
 |      shuffle
 |          (T/F) Enable shuffle filter.
 |      fletcher32
 |          (T/F) Enable fletcher32 error detection. Not permitted in
 |          conjunction with the scale/offset filter.
 |      fillvalue
 |          (Scalar) Use this value for uninitialized parts of the dataset.
 |      track_times
 |          (T/F) Enable dataset creation timestamps.
        """

        # File.create_group()
        """
 |  create_group(self, name, track_order=False)
 |      Create and return a new subgroup.
 |
 |      Name may be absolute or relative.  Fails if the target name already
 |      exists.
 |
 |      track_order
 |          Track dataset/group creation order under this group if True.
 |
        """

        # File.get()
        """
         |  get(self, name, default=None, getclass=False, getlink=False)
         |      Retrieve an item or other information.
         |
         |      "name" given only:
         |          Return the item, or "default" if it doesn't exist
         |
         |      "getclass" is True:
         |          Return the class of object (Group, Dataset, etc.), or "default"
         |          if nothing with that name exists
         |
         |      "getlink" is True:
         |          Return HardLink, SoftLink or ExternalLink instances.  Return
         |          "default" if nothing with that name exists.
         |
         |      "getlink" and "getclass" are True:
         |          Return HardLink, SoftLink and ExternalLink classes.  Return
         |          "default" if nothing with that name exists.
         |
         |      Example:
         |
         |      >>> cls = group.get('foo', getclass=True)
         |      >>> if cls == SoftLink:
         |      ...     print '"foo" is a soft link!'
        """

        # File.move()
        """
 |  move(self, source, dest)
 |      Move a link to a new location in the file.
 |
 |      If "source" is a hard link, this effectively renames the object.  If
 |      "source" is a soft or external link, the link itself is moved, with its
 |      value unmodified.
        """

        # File.require_dataset()
        """
         |  require_dataset(self, name, shape, dtype, exact=False, **kwds)
         |      Open a dataset, creating it if it doesn't exist.
         |
         |      If keyword "exact" is False (default), an existing dataset must have
         |      the same shape and a conversion-compatible dtype to be returned.  If
         |      True, the shape and dtype must match exactly.
         |
         |      Other dataset keywords (see create_dataset) may be provided, but are
         |      only used if a new dataset is to be created.
         |
         |      Raises TypeError if an incompatible object already exists, or if the
         |      shape or dtype don't match according to the above rules.
        """

        # File.require_group()
        """
 |  require_group(self, name)
 |      Return a group, creating it if it doesn't exist.
 |
 |      TypeError is raised if something with that name already exists that
 |      isn't a group.
        """

        # File.visit()
        """
 |  visit(self, func)
 |      Recursively visit all names in this group and subgroups (HDF5 1.8).
 |
 |      You supply a callable (function, method or callable object); it
 |      will be called exactly once for each link in this group and every
 |      group below it. Your callable must conform to the signature:
 |
 |          func(<member name>) => <None or return value>
 |
 |      Returning None continues iteration, returning anything else stops
 |      and immediately returns that value from the visit method.  No
 |      particular order of iteration within groups is guaranteed.
 |
 |      Example:
 |
 |      >>> # List the entire contents of the file
 |      >>> f = File("foo.hdf5")
 |      >>> list_of_names = []
 |      >>> f.visit(list_of_names.append)
        """

        # File.visititems()
        """
 |  visititems(self, func)
 |      Recursively visit names and objects in this group (HDF5 1.8).
 |
 |      You supply a callable (function, method or callable object); it
 |      will be called exactly once for each link in this group and every
 |      group below it. Your callable must conform to the signature:
 |
 |          func(<member name>, <object>) => <None or return value>
 |
 |      Returning None continues iteration, returning anything else stops
 |      and immediately returns that value from the visit method.  No
 |      particular order of iteration within groups is guaranteed.
 |
 |      Example:
 |
 |      # Get a list of all datasets in the file
 |      >>> mylist = []
 |      >>> def func(name, obj):
 |      ...     if isinstance(obj, Dataset):
 |      ...         mylist.append(name)
 |      ...
 |      >>> f = File('foo.hdf5')
 |      >>> f.visititems(func)
        """

        import os
        demo_file = 'demo1.h5'
        if os.path.isfile(demo_file):
            h5f = h5.File(demo_file, 'a')
            h5f.require_group('group1')
            h5f.require_group('group2')
        else:
            h5f = h5.File('demo1.h5')
            h5f.create_group('group1')
            h5f.require_group('group2')

        data1 = np.random.random_integers(0, 100, (5, 3))
        data2 = [1, 2, 3]
        print(data1)

        # h5data1 = h5f.get('data1')
        # h5data1 = data1
        h5f['group1/data1'][:] = data1[:]
        h5data1 = h5f['group1/data1']
        h5data1[:] = data1[:]
        # h5data2 = h5f.get('group2/data2')
        # h5data2 = data2
        # print(h5data1[:, :])

        h5f.close()

        h5f = h5.File('demo1.h5', 'r')
        print(h5f['group1/data1'][:, :])
        print(h5f['group2/data2'][:])
        h5f.close()

        print(
            # f"{h5f['data1']}"
        )
        return

    @staticmethod
    def demo1_create(demo_file='demo_h5py.h5', mode='w'):
        """
        create HDF5 file with following content:
        root.data0
             ├-- group1/data1
             └-- group2/data2
        data1 = list(range(10))
        data1 = np.random.randn(3, 3)
        data2 = np.random.random_integers(0, 100, (3, 3))
        ------
        :param demo_file: file name to create
        :param mode: file mode to open
        :return: None
        """
        data01 = ['苹果apple', '橘子orange', '香蕉banana']                            # usable for unicode
        data02 = np.array(['apple', 'orange', 'banana'], dtype='S6')                 # only used for ascii
        # data03 = np.array(['苹果apple', '橘子orange', '香蕉banana'], dtype='U10')   # unusable in HDF5
        data1 = np.random.randn(3, 3)
        data2 = np.random.random_integers(0, 100, (3, 3))

        try:
            h5f = h5.File('demo1.h5', mode=mode)

            # variable length string type
            dstr = h5.special_dtype(vlen=str)
            h5f.create_dataset('data01', (3, ), dtype=dstr)
            h5f['data01'][0:3] = data01[0:3]
            h5f['data01'].attrs['name'] = 'fruit01'
            # print(h5f['data01'][:])

            # fixed length string length
            h5f['data02'] = data02
            h5f['data02'].attrs['name'] = 'fruit02'
            h5f['data02'].dtype
            print(data02[:])

            # Fail: use array unicode dtype in array
            # h5f['data03'] = data03
            # h5f.create_dataset('data03', data=data03, dtype=dstr)

            # Fail: use array unicode dtype in attrs
            # h5f['data02'].attrs['name'] = data03[0]
            # h5f['data03'].dtype

            # create dataset by create_dataset with (name, size, dtype), then set real data
            d1 = h5f.create_dataset('/group1/data1', (3, 3), dtype=np.float16)
            d1[:] = data1[:]

            # create dataset in subgroup directly
            h5f['/group2/data2'] = data2

        except (TypeError, ValueError, KeyError, UnicodeEncodeError) as e:
            print(e)
            h5f.close()
            return

        pstr = \
f"""
# 准备存储到HDF5的字符、浮点、整数数据
 >>> data01 = ['苹果apple', '橘子orange', '香蕉banana']               # 变长度字符型数据（str, Unicode字符串）
 >>> data02 = np.array(['apple', 'orange', 'banana'], dtype='S6')    # 固定长度字符型数据({data02.dtype}, ascii字节串)
 >>> data1 = np.random.randn(3, 3)                                   # 浮点数类型({data1.dtype})
 >>> data2 = np.random.random_integers(0, 100, (3, 3))               # 整数类型({data2.dtype})

 >>> h5f = h5.File('demo1.h5', mode=mode)

# 使用变长字符串类型存储字符类型数据
 >>> dstr = h5.special_dtype(vlen=str)
 >>> h5f.create_dataset('data01', (3, ), dtype=dstr)
 >>> h5f['data01'][0:3] = data01[0:3]
 >>> h5f['data02'].attrs['name'] = 'fruit01'
 >>> h5f['data01'][:]
{h5f['data01'][:]}
 >>> h5f['data01'].dtype
{h5f['data01'].dtype}

# 使用固定长度类型存储字符串类型数据
 >>> h5f['data02'] = data02
 >>> h5f['data02'].attrs['name'] = 'fruit02'
 >>> h5f['data02'][:]
{h5f['data02'][:]}
 >>> h5f['data02'].dtype
{h5f['data02'].dtype}       # 管道符号'|'表示不需要标识字节顺序

# 使用create_dataset建立数据集，初始化时指定维度容量以及数据类型
 >>> d1 = h5f.create_dataset('/group1/data1', (3, 3), dtype=np.float16)
 >>> d1[:] = data1[:]
{d1[:]}

# 使用带有组名的数据集名称，可以直接建立组和数据集
 >>> h5f['/group2/data2'] = data2
 >>> h5f['/group2/data2'][:]
{h5f['/group2/data2'][:]}
 >>> h5f['/group2/data2'].dtype
{h5f['/group2/data2'].dtype}
"""
        h5f.close()
        print(pstr)
        return

    @staticmethod
    def demo2_reset(demo_file='demo_h5py.h5'):
        data0 = np.array(['mongosteen', 'mongo', 'peach'], dtype='S10')
        data1 = np.random.randn(1, 9)
        data2 = np.random.random_integers(100, 1000, (3, 3))

        if not os.path.isfile(demo_file):
            print('no file found!')
        h5f = h5.File(demo_file, 'a')

        # reset data0
        h5f['data01'][0:2] = data0[0:2]
        h5f['data01'].attrs['name'] = 'Fruit Store'

        # reset group1/data1
        del h5f['/group1/data1']
        h5f.create_dataset('/group1/data1', data=data1, shape=(1, 9))

        # reset group2/data2
        h5f['/group2/data2'][:] = data2[:]

        # create new group
        h5f.require_group('group3')

        pstr = \
f"""
# 重新生成数据
 >>> data0 = np.array(['mongosteen', 'mongo', 'peach'], dtype='S10') # 改变字符类型为字节串
 >>> data1 = np.random.randn(1, 9)                                   # 改变维度
 >>> data2 = np.random.random_integers(100, 1000, (3, 3))            # 改变数据范围

# 以添加方式打开HDF5文件
 >>> h5f = h5.File(demo_file, 'a')

# 以切片赋值方式改变原数据集内容
 >>> h5f['data01'][0:2] = data0[0:2]
 >>> h5f['data01'][:]
{h5f['data01'][:]}

# 改变数据集描述属性'name'的内容
 >>> h5f['data01'].attrs['name'] = 'Fruit Store'
 >>> h5f['data01'].attrs['name']
{h5f['data01'].attrs['name']}

# 删除组对象，重新创建
 >>> del h5f['/group1/data1']
 >>> h5f['group1']
KeyError: "Unable to open object (object 'group1' doesn't exist)"
 >>> h5f.create_dataset('/group1/data1', data=data1, shape=(1, 9))
 >>> h5f['/group1/data1'][:]
{h5f['group1/data1'][:]}

# 重新赋值数据集所有数据
 >>> h5f['/group2/data2'][:] = data2[:]
 >>> h5f['/group2/data2'][:]
{h5f['/group2/data2'][:]}

# 检查组是否存在，如果不存在则进行创建新的组
 >>> h5f.require_group('group3')
 >>> h5f['group3']
{h5f['group3']}
"""
        h5f.close()

        print(pstr)
        return

    @staticmethod
    def demo3_traverse(demo_file='demo_h5py.h5'):
        # traverse as dict
        try:
            h5f = h5.File(demo_file, 'r')
            pstr1 = ''
            for item in h5f.items():
                pstr1 += str(item) + '\n'
            pstr2 = ''
            for name in h5f.keys():
                pstr2 += name + '\n'
            pstr3 = ''
            for v in h5f.values():
                pstr3 += str(v) + '\n'
        finally:
            h5f.close()

        print(
            f"""
# demo1.h5结构
root--data01
      data02
      group1/data1
      group2/data2
      group3

# 以字典方式遍历demo1.h5的根组
 >>> h5f = h5.File(demo_file, 'r')
 >>> for item in h5f.items():
 ...    print(item)
{pstr1}
 >>> for item in h5f.keys():
 ...    print(item)
{pstr2}
 >>> for item in h5f.values():
 ...    print(item)
{pstr3}
            """
            )

        try:
            # traverse by visitor
            h5f = h5.File(demo_file, 'r')

            # 需要设置一个函数，传递给visit，用于处理每个检索到的对象
            h5f.visit(lambda x: print(x))

            # 设置一个列表，使用其append方法作为函数，可以将检索项目收集到列表中
            item_list = []
            h5f.visit(item_list.append)
            # print(item_list)

            # 直接访问对象
            item_list2 = []
            fun3 = lambda x, y: item_list2.append((x, y))
            h5f.visititems(fun3)
            # print(item_list2)

            # 返回特定对象
            def fun4(key, obj):
                if obj.attrs.get('name') == 'fruit02':
                    return obj
            result = h5f.visititems(fun4)
            # print(result)

        except:
            h5f.close()

        chr_n = '\n'
        print(
            f"""

# 使用visit方式进行遍历
 >>> h5f = h5.File(demo_file, 'r')

# 需要设置一个函数，传递给visit，用于处理每个检索到的对象
 >>> fun = lambda x: print(x)
 >>> h5f.visit(fun)
{chr_n.join([str(s) for s in item_list])}

# 设置一个列表，使用其append方法作为函数，可以将检索项目收集到列表中
 >>> item_list = []
 >>> h5f.visit(item_list.append)
{item_list}

# 直接访问对象
 >>> item_list2 = []
 >>> fun3 = lambda x, y: item_list2.append((x, y))
 >>> h5f.visititems(fun3)
[{chr_n.join([str(s) for s in item_list2])}]

# 返回特定对象
 >>> def fun4(key, obj):
 ...   if obj.attrs.get('name') == 'fruit02':
 ...       return obj
 >>> result = h5f.visititems(fun4)
 >>> result
{result[:]}

 >>> h5f.close()
            """
        )
        h5f.close()

        return

    @staticmethod
    def demo4_visit_by_type(demo_file='demo_h5py.h5'):
        # traverse by visitor
        h5f = h5.File(demo_file, 'r')
        try:
            # 打印数据集
            print("Datasets in {}:".format(demo_file))
            h5f.visititems(lambda name, obj: print(obj) if isinstance(obj, h5.Dataset) else None)

            # 显示每个检索到的对象的类型
            print("objects in {}:".format(demo_file))
            group_list = []
            h5f.visititems(lambda item, x: group_list.append((isinstance(x, h5.Group), x)))
            print('\n'.join([str(g) for g in group_list]))
        finally:
            h5f.close()
        return
