# coding=utf8


import pandas as pd
# import tables as tb
import copy


class Demo:

    @staticmethod
    def pd_hdfstore_doc():
        """
class HDFStore(builtins.object)
 |  HDFStore(path, mode: str = 'a', complevel: Union[int, NoneType] = None, complib=None, fletcher32: bool = False, **kwargs)
 |
 |  Dict-like IO interface for storing pandas objects in PyTables.
 |
 |  Either Fixed or Table format.
 |
 |  .. warning::
 |
 |     Pandas uses PyTables for reading and writing HDF5 files, which allows
 |     serializing object-dtype data with pickle when using the "fixed" format.
 |     Loading pickled data received from untrusted sources can be unsafe.
 |
 |     See: https://docs.python.org/3/library/pickle.html for more.
 |
 |  Parameters
 |  ----------
 |  path : str
 |      File path to HDF5 file.
 |  mode : {'a', 'w', 'r', 'r+'}, default 'a'
 |
 |      ``'r'``
 |          Read-only; no data can be modified.
 |      ``'w'``
 |          Write; a new file is created (an existing file with the same
 |          name would be deleted).
 |      ``'a'``
 |          Append; an existing file is opened for reading and writing,
 |          and if the file does not exist it is created.
 |      ``'r+'``
 |          It is similar to ``'a'``, but the file must already exist.
 |  complevel : int, 0-9, default None
 |      Specifies a compression level for data.
 |      A value of 0 or None disables compression.
 |  complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
 |      Specifies the compression library to be used.
 |      As of v0.20.2 these additional compressors for Blosc are supported
 |      (default if no compressor specified: 'blosc:blosclz'):
 |      {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
 |       'blosc:zlib', 'blosc:zstd'}.
 |      Specifying a compression library which is not available issues
 |      a ValueError.
 |  fletcher32 : bool, default False
 |      If applying compression use the fletcher32 checksum.
 |  **kwargs
 |      These parameters will be passed to the PyTables open_file method.
 |
 |  Examples
 |  --------
 |  >>> bar = pd.DataFrame(np.random.randn(10, 4))
 |  >>> store = pd.HDFStore('syntax.h5')
 |  >>> store['foo'] = bar   # write to HDF5
 |  >>> bar = store['foo']   # retrieve
 |  >>> store.close()
 |
 |  **Create or load HDF5 file in-memory**
 |
 |  When passing the `driver` option to the PyTables open_file method through
 |  **kwargs, the HDF5 file is loaded or created in-memory and will only be
 |  written when closed:
 |
 |  >>> bar = pd.DataFrame(np.random.randn(10, 4))
 |  >>> store = pd.HDFStore('syntax.h5', driver='H5FD_CORE')
 |  >>> store['foo'] = bar
 |  >>> store.close()   # only now, data is written to disk
 |
 |  Methods defined here:
 |
 |  __contains__(self, key: str) -> bool
 |      check for existence of this key
 |      can match the exact pathname or the pathnm w/o the leading '/'
 |
 |  __delitem__(self, key: str)
 |
 |  __enter__(self)
 |
 |  __exit__(self, exc_type, exc_value, traceback)
 |
 |  __fspath__(self)
 |
 |  __getattr__(self, name: str)
 |      allow attribute access to get stores
 |
 |  __getitem__(self, key: str)
 |
 |  __init__(self, path, mode: str = 'a', complevel: Union[int, NoneType] = None, complib=None,
             fletcher32: bool = False, **kwargs)
 |      Initialize self.  See help(type(self)) for accurate signature.
 |
 |  __iter__(self)
 |
 |  __len__(self) -> int
 |
 |  __repr__(self) -> str
 |      Return repr(self).
 |
 |  __setitem__(self, key: str, value)
 |
        """

    @staticmethod
    def pd_hdfstore_doc_append(self):
        # HDFStore.append
        """
 |  append(self, key: str, value: ~FrameOrSeries, format=None, axes=None, index=True, append=True, complib=None, 
           complevel: Union[int, NoneType] = None, columns=None, min_itemsize: Union[int, Dict[str, int], 
           NoneType] = None, nan_rep=None, chunksize=None, expectedrows=None, dropna: Union[bool, NoneType] = None, 
           data_columns: Union[List[str], NoneType] = None, encoding=None, errors: str = 'strict')
 |      
        Append to Table in file. Node must already exist and be Table format.
 |
 |      Parameters
 |      ----------
 |      key : str
 |      value : {Series, DataFrame}
 |      format : 'table' is the default
 |          Format to use when storing object in HDFStore.  Value can be one of:
 |
 |          ``'table'``
 |              Table format. Write as a PyTables Table structure which may perform
 |              worse but allow more flexible operations like searching / selecting
 |              subsets of the data.
 |      append : bool, default True
 |          Append the input data to the existing.
 |      data_columns : list of columns, or True, default None
 |          List of columns to create as indexed data columns for on-disk
 |          queries, or True to use all columns. By default only the axes
 |          of the object are indexed. See `here
 |          <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
 |      min_itemsize : dict of columns that specify minimum str sizes
 |      nan_rep      : str to use as str nan representation
 |      chunksize    : size to chunk the writing
 |      expectedrows : expected TOTAL row size of this table
 |      encoding     : default None, provide an encoding for str
 |      dropna : bool, default False
 |          Do not write an ALL nan row to the store settable
 |          by the option 'io.hdf.dropna_table'.
 |
 |      Notes
 |      -----
 |      Does *not* check if data being appended overlaps with existing
 |      data in the table, so be careful
        """

    @staticmethod
    def pd_hdfscore_doc_append_to_multiple():
        # append_to_multiple
        """
 |  append_to_multiple(self, d: Dict, value, selector, data_columns=None, axes=None, dropna=False, **kwargs)
 |      Append to multiple tables
 |
 |      Parameters
 |      ----------
 |      d : a dict of table_name to table_columns, None is acceptable as the
 |          values of one node (this will get all the remaining columns)
 |      value : a pandas object
 |      selector : a string that designates the indexable table; all of its
 |          columns will be designed as data_columns, unless data_columns is
 |          passed, in which case these are used
 |      data_columns : list of columns to create as data columns, or True to
 |          use all columns
 |      dropna : if evaluates to True, drop rows from all tables if any single
 |               row in each table has all NaN. Default False.
 |
 |      Notes
 |      -----
 |      axes parameter is currently not accepted
        """

    @staticmethod
    def pd_hdfstore_doc_copy():
        # HDFStore.copy
        """
 |  copy(file, mode='w', propindexes: bool = True, keys=None, complib=None, complevel: Union[int, NoneType] = None,
         fletcher32: bool = False, overwrite=True)
 |      Copy the existing store to a new file, updating in place.
 |
 |      Parameters
 |      ----------
 |      propindexes: bool, default True
 |          Restore indexes in copied file.
 |      keys       : list of keys to include in the copy (defaults to all)
 |      overwrite  : overwrite (remove and replace) existing nodes in the
 |          new store (default is True)
 |      mode, complib, complevel, fletcher32 same as in HDFStore.__init__
 |
 |      Returns
 |      -------
 |      open file handle of the new store
        """

    @staticmethod
    def pd_hdfstore_doc_create_table_index():
        # HDFStore.create_table_index
        """
 |  create_table_index(self, key: str, columns=None, optlevel: Union[int, NoneType] = None,
                       kind: Union[str, NoneType] = None)
 |      Create a pytables index on the table.
 |
 |      Parameters
 |      ----------
 |      key : str
 |      columns : None, bool, or listlike[str]
 |          Indicate which columns to create an index on.
 |
 |          * False : Do not create any indexes.
 |          * True : Create indexes on all columns.
 |          * None : Create indexes on all columns.
 |          * listlike : Create indexes on the given columns.
 |
 |      optlevel : int or None, default None
 |          Optimization level, if None, pytables defaults to 6.
 |      kind : str or None, default None
 |          Kind of index, if None, pytables defaults to "medium".
 |
 |      Raises
 |      ------
 |      TypeError: raises if the node is not a table
        """

    @staticmethod
    def pd_hdfstore_doc_flush():
        # HDFStore.flush
        """
   |  flush(self, fsync: bool = False)
 |      Force all buffered modifications to be written to disk.
 |
 |      Parameters
 |      ----------
 |      fsync : bool (default False)
 |        call ``os.fsync()`` on the file handle to force writing to disk.
 |
 |      Notes
 |      -----
 |      Without ``fsync=True``, flushing may not guarantee that the OS writes
 |      to disk. With fsync, the operation will block until the OS claims the
 |      file has been written; however, other caching layers may still
 |      interfere.
        """

    @staticmethod
    def pd_hdfstore_doc_get():
        # HDFStore.get
        """
|  get(self, key: str)
 |      Retrieve pandas object stored in file.
 |
 |      Parameters
 |      ----------
 |      key : str
 |
 |      Returns
 |      -------
 |      object
 |          Same type as object stored in file.
        """

    @staticmethod
    def pd_hdfstore_doc_get_node():
        # HDFStore.get_node, get_storer
        """
 |  get_node(self, key: str) -> Union[ForwardRef('Node'), NoneType]
 |      return the node with the key or None if it does not exist
 |
 |  get_storer(self, key: str) -> Union[ForwardRef('GenericFixed'), ForwardRef('Table')]
 |      return the storer object for a key, raise if not in the file
 |  groups(self)
 |      Return a list of all the top-level nodes.
 |
 |      Each node returned is not a pandas storage object.
 |
 |      Returns
 |      -------
 |      list
 |          List of objects.
 |
 |  info(self) -> str
 |      Print detailed information on the store.
 |
 |      Returns
 |      -------
 |      str
 |
 |  items(self)
 |      iterate on key->group
 |
 |  iteritems = items(self)
|
 |  keys(self, include: str = 'pandas') -> List[str]
 |      Return a list of keys corresponding to objects stored in HDFStore.
 |
 |      Parameters
 |      ----------
 |
 |      include : str, default 'pandas'
 |              When kind equals 'pandas' return pandas objects
 |              When kind equals 'native' return native HDF5 Table objects
 |
 |              .. versionadded:: 1.1.0
 |
 |      Returns
 |      -------
 |      list
 |          List of ABSOLUTE path-names (e.g. have the leading '/').
 |
 |      Raises
 |      ------
 |      raises ValueError if kind has an illegal value
 |
    """

    @staticmethod
    def pd_hdfstore_doc_open():
        """
 |  open(self, mode: str = 'a', **kwargs)
 |      Open the file in the specified mode
 |
 |      Parameters
 |      ----------
 |      mode : {'a', 'w', 'r', 'r+'}, default 'a'
 |          See HDFStore docstring or tables.open_file for info about modes
 |      **kwargs
 |          These parameters will be passed to the PyTables open_file method.
 |
 |  put(self, key: str, value: ~FrameOrSeries, format=None, index=True, append=False, complib=None, 
        complevel: Union[int, NoneType] = None, min_itemsize: Union[int, Dict[str, int], NoneType] = None, 
        nan_rep=None, data_columns: Union[List[str], NoneType] = None, encoding=None, errors: str = 'strict', 
        track_times: bool = True)
 |      
        Store object in HDFStore.
 |
 |      Parameters
 |      ----------
 |      key : str
 |      value : {Series, DataFrame}
 |      format : 'fixed(f)|table(t)', default is 'fixed'
 |          Format to use when storing object in HDFStore. Value can be one of:
 |
 |          ``'fixed'``
 |              Fixed format.  Fast writing/reading. Not-appendable, nor searchable.
 |          ``'table'``
 |              Table format.  Write as a PyTables Table structure which may perform
 |              worse but allow more flexible operations like searching / selecting
 |              subsets of the data.
 |      append   : bool, default False
 |          This will force Table format, append the input data to the
 |          existing.
 |      data_columns : list, default None
 |          List of columns to create as data columns, or True to
 |          use all columns. See `here
 |          <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
 |      encoding : str, default None
 |          Provide an encoding for strings.
 |      dropna   : bool, default False, do not write an ALL nan row to
 |          The store settable by the option 'io.hdf.dropna_table'.
 |      track_times : bool, default True
 |          Parameter is propagated to 'create_table' method of 'PyTables'.
 |          If set to False it enables to have the same h5 files (same hashes)
 |          independent on creation time.
 |
 |          .. versionadded:: 1.1.0
 |
 |  remove(self, key: str, where=None, start=None, stop=None)
 |      Remove pandas object partially by specifying the where condition
 |
 |      Parameters
 |      ----------
 |      key : string
 |          Node to remove or delete rows from
 |      where : list of Term (or convertible) objects, optional
 |      start : integer (defaults to None), row number to start selection
 |      stop  : integer (defaults to None), row number to stop selection
 |
 |      Returns
 |      -------
 |      number of rows removed (or None if not a Table)
 |
 |      Raises
 |      ------
 |      raises KeyError if key is not a valid store
 |
 |  select(self, key: str, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None,
           auto_close: bool = False)
 |      Retrieve pandas object stored in file, optionally based on where criteria.
 |
 |      .. warning::
 |
 |         Pandas uses PyTables for reading and writing HDF5 files, which allows
 |         serializing object-dtype data with pickle when using the "fixed" format.
 |         Loading pickled data received from untrusted sources can be unsafe.
 |
 |         See: https://docs.python.org/3/library/pickle.html for more.
 |
 |      Parameters
 |      ----------
 |      key : str
 |              Object being retrieved from file.
 |      where : list, default None
 |              List of Term (or convertible) objects, optional.
 |      start : int, default None
 |              Row number to start selection.
 |      stop : int, default None
 |              Row number to stop selection.
 |      columns : list, default None
 |              A list of columns that if not None, will limit the return columns.
 |      iterator : bool, default False
 |              Returns an iterator.
 |      chunksize : int, default None
 |              Number or rows to include in iteration, return an iterator.
 |      auto_close : bool, default False
 |          Should automatically close the store when finished.
 |
 |      Returns
 |      -------
 |      object
 |          Retrieved object from file.
 |
 |  select_as_coordinates(self, key: str, where=None, start: Union[int, NoneType] = None,
                          stop: Union[int, NoneType] = None)
 |      return the selection as an Index
 |
 |      .. warning::
 |
 |         Pandas uses PyTables for reading and writing HDF5 files, which allows
 |         serializing object-dtype data with pickle when using the "fixed" format.
 |         Loading pickled data received from untrusted sources can be unsafe.
 |
 |         See: https://docs.python.org/3/library/pickle.html for more.
 |
 |
 |      Parameters
 |      ----------
 |      key : str
 |      where : list of Term (or convertible) objects, optional
 |      start : integer (defaults to None), row number to start selection
 |      stop  : integer (defaults to None), row number to stop selection
 |
 |  select_as_multiple(self, keys, where=None, selector=None, columns=None, start=None, stop=None, iterator=False, chunksize=None, auto_close: bool = False)
 |      Retrieve pandas objects from multiple tables.
 |
 |      .. warning::
 |
 |         Pandas uses PyTables for reading and writing HDF5 files, which allows
 |         serializing object-dtype data with pickle when using the "fixed" format.
 |         Loading pickled data received from untrusted sources can be unsafe.
 |
 |         See: https://docs.python.org/3/library/pickle.html for more.
 |
 |      Parameters
 |      ----------
 |      keys : a list of the tables
 |      selector : the table to apply the where criteria (defaults to keys[0]
 |          if not supplied)
 |      columns : the columns I want back
 |      start : integer (defaults to None), row number to start selection
 |      stop  : integer (defaults to None), row number to stop selection
 |      iterator : boolean, return an iterator, default False
 |      chunksize : nrows to include in iteration, return an iterator
 |      auto_close : bool, default False
 |          Should automatically close the store when finished.
 |
 |      Raises
 |      ------
 |      raises KeyError if keys or selector is not found or keys is empty
 |      raises TypeError if keys is not a list or tuple
 |      raises ValueError if the tables are not ALL THE SAME DIMENSIONS
 |
 |  select_column(self, key: str, column: str, start: Union[int, NoneType] = None, stop: Union[int, NoneType] = None)
 |      return a single column from the table. This is generally only useful to
 |      select an indexable
 |
 |      .. warning::
 |
 |         Pandas uses PyTables for reading and writing HDF5 files, which allows
 |         serializing object-dtype data with pickle when using the "fixed" format.
 |         Loading pickled data received from untrusted sources can be unsafe.
 |
 |         See: https://docs.python.org/3/library/pickle.html for more.
 |
 |      Parameters
 |      ----------
 |      key : str
 |      column : str
 |          The column of interest.
 |      start : int or None, default None
 |      stop : int or None, default None
 |
 |      Raises
 |      ------
 |      raises KeyError if the column is not found (or key is not a valid
 |          store)
 |      raises ValueError if the column can not be extracted individually (it
 |          is part of a data block)
 |
 |  walk(self, where='/')
 |      Walk the pytables group hierarchy for pandas objects.
 |
 |      This generator will yield the group path, subgroups and pandas object
 |      names for each group.
 |
 |      Any non-pandas PyTables objects that are not a group will be ignored.
 |
 |      The `where` group itself is listed first (preorder), then each of its
 |      child groups (following an alphanumerical order) is also traversed,
 |      following the same procedure.
 |
 |      .. versionadded:: 0.24.0
 |
 |      Parameters
 |      ----------
 |      where : str, default "/"
 |          Group where to start walking.
 |
 |      Yields
 |      ------
 |      path : str
 |          Full path to a group (without trailing '/').
 |      groups : list
 |          Names (strings) of the groups contained in `path`.
 |      leaves : list
 |          Names (strings) of the pandas objects contained in `path`.
 |
 |  ----------------------------------------------------------------------
 |  Data descriptors defined here:
 |
 |  __dict__
 |      dictionary for instance variables (if defined)
 |
 |  __weakref__
 |      list of weak references to the object (if defined)
 |
 |  filename
 |
 |  is_open
 |      return a boolean indicating whether the file is open
 |
 |  root
 |      return the root node
 |
 |  ----------------------------------------------------------------------
 |  Data and other attributes defined here:
 |
 |  __annotations__ = {'_complevel': <class 'int'>, '_fletcher32': <class ...
        """
        return

    @staticmethod
    def show_hdfstore(hdf5file='demo_pandas_hdfstore.h5'):
        try:
            hf = pd.HDFStore(hdf5file, 'r')
            print(f"""
                Groups:
                {hf.groups()}
                {'-'*30}
                Description:
                {hf.root.df.table.description}        
                {'-'*30}
                {hf['df'][:]}
            """)
        except Exception as e:
            print(e)
            hf.close()

    @staticmethod
    def pd_hdfstore(hdf5file='demo_pandas_hdfstore.h5'):
        faker_names = ['程柳青', '窦建军', '张梦想']
        sr = pd.Series(data=faker_names,
                       index=pd.date_range(start=pd.Timestamp('2010-1-1'), freq=pd.offsets.Day(1), periods=3))
        df = pd.DataFrame(
            data={'id_numstr': ['{:03d}'.format(j) for j in range(3)],
                  'name_cnstr': faker_names,
                  'math_int': [100, 90, 92],
                  'art_float': [77.5, 98.2, 87.77],
                  'birth_datetime': [pd.Timestamp('2001-3-5'), pd.Timestamp('1998-2-1'), pd.Timestamp('2002-12-15')],
                  'pass_bool': [True, False, True]
                  }
        )

        # remove old hdf5file
        import os
        if os.path.isfile(hdf5file):
            os.remove(hdf5file)

        # *** fixed format ***
        # --- write group/dataset
        h5store = pd.HDFStore(hdf5file)

        # group: /
        h5store.put('sr', sr, format='table')
        h5store.append('sr', sr)                    # only tables allowed to append data
        h5store.put('df', df, format='table', min_itemsize=2)   # what limits to data managing ?
        h5store.append('df', df)
        print(f"{h5store.root.df.table}")

        # group: /group1, /group2, /group5/group51/group511
        h5store.put('group1/sr', sr)
        h5store['group2/df'] = df
        # h5store.append('/group1/sr', sr)          # fixed not allowed to append data
        h5store['group5/group51/group511/df'] = df  # multi-hierarchical groups

        # --- traverse
        keys = h5store.keys()
        walks_list = []
        for group, subgroups, objects in h5store.walk():
            walks_list.append("{}, {}, {}".format(group, subgroups, objects))
        # walks = '\n'.join(walks_list)
        # print(walks)

        h5store.close()
        # *** fixed close ***

        # *** table format ***
        # -- open with mode 'a'
        h5store = pd.HDFStore('demo_pandas_hdfstore.h5', mode='a')

        # --- create by append mode, format is 'table'
        h5store.append('group3/sr', sr)
        group3_sr_type = h5store.root.group3.sr._v_attrs.pandas_type

        # --- create by put method, set format to 'table'
        h5store.put(key='group4/df', value=df, format='table')
        # print(h5store.root.group4.df._v_attrs.pandas_type)
        # return

        # --- append data again
        h5store.append('/group3/sr', sr)
        sr_append = copy.copy(h5store['group3/sr'])
        h5store.append('group4/df', df)
        h5store.flush()
        # print(h5store.root.group4.df._v_attrs.pandas_type)

        # --- query
        dfq2 = h5store['group2/df'].query('index==1')
        dfq4 = h5store['group4/df'].query('name_cnstr=="程柳青"')
        dfselect4 = h5store.select('group4/df', where=["index>1 & columns in ['id_numstr']"])

        # --- modify
        try:
            h5store['group2/df'] = h5store['group2/df'].drop(labels=['pass_bool'], axis=1)
            h5store.flush()
            df2m = h5store['group2/df']
            # dfnew = h5store['group4/df'].drop(labels=['art_float'], axis=1)
            h5store.remove('group4/df')
            h5store.put('group4/df', df, format='table', data_columns=True)
            h5store.append('group4/df', df)
            h5store.flush()
            df4m = h5store['group4/df'].drop(labels=[0, 2], axis=0)
        except Exception:
            print('fail')
            h5store.close()

        # --- remove method
        h5store.remove(key='group3/sr')

        # *** output text ***
        print(
            f"""
# pandas 数据集
 >>> sr
{sr}
 >>> df
{df}
# 以缺省方式存储到HDF5，使用的是fixed格式（设置在pandas.io.hdf.default_format）
 >>> h5store = pd.HDFStore(hdf5file)
 >>> h5store.put('group1/sr', sr)                   # 使用put方法创建，缺省格式为series
 >>> h5store['group2/df'] = df                      # 直接赋值创建, 缺省格式为frame
 >>> h5store.put('sr', sr, format='table')          # 使用put创建table格式数据集
 >>> h5store.put('df', df, format='table')          # 使用put创建table格式数据集
 >>> h5store['group5/group51/group511/df'] = df     # 在多层目录中直接创建数据集

# 查看存储格式
 >>> print(h5store.root.group1.sr._v_attrs.pandas_type)
{h5store.root.group1.sr._v_attrs.pandas_type}
 >>> print(h5store.root.group2.df._v_attrs.pandas_type)
{h5store.root.group2.df._v_attrs.pandas_type}
 >>> print(h5store.root.sr._v_attrs.pandas_type)
{h5store.root.sr._v_attrs.pandas_type}
 >>> print(h5store.root.df._v_attrs.pandas_type)
{h5store.root.df._v_attrs.pandas_type}

# 查看键值，列出HDF5中数据对象情况
 >>> h5store.keys()
{keys}

# 遍历整个文件, 查看存储对象情况
 >>> for group, subgroups, objects in h5store.walk():
 ...     print(group, subgroups, objects)   # 各组的组名、子组名列表、对象名列表
{walks_list[0]}             # 根组
{walks_list[1]}                                     # group1组
{walks_list[2]}                                     # group2组
{walks_list[3]}                                 # group5组
{walks_list[4]}                         # group5/group51组
{walks_list[5]}                     # group5/group51/group511组

# 添加数据
# -- fixed格式数据集不允许添加数据
 >>> h5store.append('group1/sr', sr)
ValueError: Can only append to Tables

# -- table格式数据集允许添加数据
 >>> h5store.append('sr', sr)
 >>> h5store['sr']
{h5store['sr']}

# 关闭HDFStore或使用flush()方法，保证保存数据到磁盘文件中
 >>> h5store.close()

# 以table模式存储到HDF5，效率有所降低，可以支持添加数据和where提取数据
# --- open with mode 'a'
 >>> h5store = pd.HDFStore('demo_pandas_hdfstore.h5', mode='a')

# 以append模式创建HDF5数据集, 缺省模式为table格式
 >>> h5store.append('group3/sr', sr)
 >>> h5store.append('group4/df', df)
 >>> print(h5store.root.group3.sr._v_attrs.pandas_type)
{group3_sr_type}
 >>> print(h5store.root.group4.df._v_attrs.pandas_type)
{h5store.root.group4.df._v_attrs.pandas_type}
 >>> h5store.close()

# 以append模式向已有数据集添加数据
 >>> h5store.append('/group3/sr', sr)
# 使用put方法生成table格式数据集
 >>> h5store.put(key='group4/df', value=df, format='table')

# 两次添加的数据情况
 >>> print(h5store['group3/sr'][:])
{sr_append[:]}
 >>> print(h5store['group4/df'][:])
{h5store['group4/df'][:]}

# 查询HDF5数据集内容
 >>> h5store['group2/df'].query('index==1')
{dfq2}

 >>> h5store['group4/df'].query('name_cnstr=="程柳青"')
{dfq4}

 >>> h5store.select('group4/df', where=["index > 1 & columns in ['id_numstr']"])
{dfselect4}

 >>> h5store.select('group4/df', where="math_int > 90")
{h5store.select('group4/df', where="math_int > 90")[:]}

# 修改HDF5数据集内容
 >>> h5store['group2/df'] = h5store['group2/df'].drop(labels=['pass_bool'], axis=1)
 >>> h5store.flush()
 >>> h5store['group2/df']
{df2m}

 >>> h5store['group4/df'] = h5store['group4/df'].drop(labels=[0, 2], axis=0)
 >>> h5store.flush()
 >>> h5store['group4/df']
{df4m}

# 删除HDFStore数据集
 >>> h5store.remove(key='/group3/sr')
# 删除后不能再引用
 >>> h5store['group3/sr']
KeyError: 'No object named group3/sr in the file'
            """
        )

        h5store.close()

        # read HDF5 with read_hdf
        dfread = pd.read_hdf('demo_pandas_hdfstore.h5', 'df')

        # write HDF5 with to_hdf
        dfread.to_hdf('demo_pandas_tohdf.h5', key='dfwrite')

        # show data in HDF5 store stored by write method
        # Demo.show_hdfstore('demo_pandas_tohdf.h5')
        # h5store = pd.HDFStore('demo_pandas_tohdf.h5')
        dfwrite = pd.read_hdf('demo_pandas_tohdf.h5', 'dfwrite')
        # h5store.close()

        print(
f"""
# 使用read_hdf读取HDF5数据
 >>> dfread = pd.read_hdf('demo_pandas_hdfstore.h5', 'df')
 >>> dfread
{dfread[:]}

# 使用to_hdf写HDF5数据
 >>> dfread.to_hdf('demo_pandas_tohdf.h5', 'dfwrite')
 >>> dfwrite = pd.read_hdf('demo_pandas_tohdf.h5', 'dfwrite')
 >>> dfwrite
{dfwrite[:]}
"""
        )


        # 使用select时，需要HDFStore数据集是table格式, 设置min_itemsize
        # demo_append_store = 'demo_pandas_hdfstore_append.h5'
        # if os.path.isfile(demo_append_store):
        #     os.remove(demo_append_store)
        # try:
        #     h5store = pd.HDFStore(demo_append_store, 'a')
        #     h5store.append('df', df, min_itemsize={'values': 2}, format='t', data_columns=True)
        #     print(h5store.select('df', where="math_int < 95 | columns in ['math_int']")[:])
        #     print(h5store['df'])
        # except Exception as e:
        #     print(e)
        #     h5store.close()

        return

    @staticmethod
    def pd_read_hdf(hdf5file='demo_pandas_hdfstore.h5'):
        """
        read_hdf(path_or_buf, key=None, mode: str = 'r', errors: str = 'strict', where=None,
                 start: Union[int, NoneType] = None, stop: Union[int, NoneType] = None, columns=None,
                 iterator=False, chunksize: Union[int, NoneType] = None, **kwargs)
            Read from the store, close it if we opened it.

            Retrieve pandas object stored in file, optionally based on where
            criteria.

            .. warning::

               Pandas uses PyTables for reading and writing HDF5 files, which allows
               serializing object-dtype data with pickle when using the "fixed" format.
               Loading pickled data received from untrusted sources can be unsafe.

               See: https://docs.python.org/3/library/pickle.html for more.

            Parameters
            ----------
            path_or_buf : str, path object, pandas.HDFStore or file-like object
                Any valid string path is acceptable. The string could be a URL. Valid
                URL schemes include http, ftp, s3, and file. For file URLs, a host is
                expected. A local file could be: ``file://localhost/path/to/table.h5``.

                If you want to pass in a path object, pandas accepts any
                ``os.PathLike``.

                Alternatively, pandas accepts an open :class:`pandas.HDFStore` object.

                By file-like object, we refer to objects with a ``read()`` method,
                such as a file handle (e.g. via builtin ``open`` function)
                or ``StringIO``.
            key : object, optional
                The group identifier in the store. Can be omitted if the HDF file
                contains a single pandas object.
            mode : {'r', 'r+', 'a'}, default 'r'
                Mode to use when opening the file. Ignored if path_or_buf is a
                :class:`pandas.HDFStore`. Default is 'r'.
            errors : str, default 'strict'
                Specifies how encoding and decoding errors are to be handled.
                See the errors argument for :func:`open` for a full list
                of options.
            where : list, optional
                A list of Term (or convertible) objects.
            start : int, optional
                Row number to start selection.
            stop  : int, optional
                Row number to stop selection.
            columns : list, optional
                A list of columns names to return.
            columns : list, optional
                A list of columns names to return.
            iterator : bool, optional
                Return an iterator object.
            chunksize : int, optional
                Number of rows to include in an iteration when using an iterator.
            **kwargs
                Additional keyword arguments passed to HDFStore.

            Returns
            -------
            item : object
                The selected object. Return type depends on the object stored.

            See Also
            --------
            DataFrame.to_hdf : Write a HDF file from a DataFrame.
            HDFStore : Low-level access to HDF files.

            Examples
            --------
            >>> df = pd.DataFrame([[1, 1.0, 'a']], columns=['x', 'y', 'z'])
            >>> df.to_hdf('./store.h5', 'data')
            >>> reread = pd.read_hdf('./store.h5')

                :param hdf5file:
                :return:
        """

        sr = pd.read_hdf(hdf5file, key='group1/sr')
        df = pd.read_hdf(hdf5file, key='group2/df')

        print(
            f"""
{sr}
{df}
            """
        )
        pass

    @staticmethod
    def df_to_hdf(hdf5file='demo_pandas_write.h5'):
        """
        to_hdf(self, path_or_buf, key: 'str', mode: 'str' = 'a', complevel: 'Optional[int]' = None,
               complib: 'Optional[str]' = None, append: 'bool_t' = False, format: 'Optional[str]' = None,
               index: 'bool_t' = True, min_itemsize: 'Optional[Union[int, Dict[str, int]]]' = None,
               nan_rep=None, dropna: 'Optional[bool_t]' = None,
               data_columns: 'Optional[Union[bool_t, List[str]]]' = None, errors: 'str' = 'strict',
               encoding: 'str' = 'UTF-8') -> 'None'

            Write the contained data to an HDF5 file using HDFStore.

            Hierarchical Data Format (HDF) is self-describing, allowing an
            application to interpret the structure and contents of a file with
            no outside information. One HDF file can hold a mix of related objects
            which can be accessed as a group or as individual objects.

            In order to add another DataFrame or Series to an existing HDF file
            please use append mode and a different a key.

            For more information see the :obj:`user guide <io.hdf5>`.

            Parameters
            ----------
            path_or_buf : str or pandas.HDFStore
                File path or HDFStore object.
            key : str
                Identifier for the group in the store.
            mode : {'a', 'w', 'r+'}, default 'a'
                Mode to open file:

                - 'w': write, a new file is created (an existing file with
                  the same name would be deleted).
                - 'a': append, an existing file is opened for reading and
                  writing, and if the file does not exist it is created.
                - 'r+': similar to 'a', but the file must already exist.
            complevel : {0-9}, optional
                Specifies a compression level for data.
                A value of 0 disables compression.
            complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
                Specifies the compression library to be used.
                As of v0.20.2 these additional compressors for Blosc are supported
                (default if no compressor specified: 'blosc:blosclz'):
                {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
                'blosc:zlib', 'blosc:zstd'}.
                Specifying a compression library which is not available issues
                a ValueError.
            append : bool, default False
                For Table formats, append the input data to the existing.
            format : {'fixed', 'table', None}, default 'fixed'
                Possible values:

                - 'fixed': Fixed format. Fast writing/reading. Not-appendable,
                  nor searchable.
                - 'table': Table format. Write as a PyTables Table structure
                  which may perform worse but allow more flexible operations
                  like searching / selecting subsets of the data.
                - If None, pd.get_option('io.hdf.default_format') is checked,
                  followed by fallback to "fixed"
            errors : str, default 'strict'
                Specifies how encoding and decoding errors are to be handled.
                See the errors argument for :func:`open` for a full list
                of options.
            encoding : str, default "UTF-8"
            min_itemsize : dict or int, optional
                Map column names to minimum string sizes for columns.
            nan_rep : Any, optional
                How to represent null values as str.
                Not allowed with append=True.
            data_columns : list of columns or True, optional
                List of columns to create as indexed data columns for on-disk
                queries, or True to use all columns. By default only the axes
                of the object are indexed. See :obj:`io.hdf5-query-data-columns`.
                Applicable only to format='table'.

            See Also
            --------
            DataFrame.read_hdf : Read from HDF file.
            DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
            DataFrame.to_sql : Write to a sql table.
            DataFrame.to_feather : Write out feather-format for DataFrames.
            DataFrame.to_csv : Write out to a csv file.

            Examples
            --------
            >>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
            ...                   index=['a', 'b', 'c'])
            >>> df.to_hdf('data.h5', key='df', mode='w')

            We can add another object to the same file:

            >>> s = pd.Series([1, 2, 3, 4])
            >>> s.to_hdf('data.h5', key='s')

            Reading from HDF file:

            >>> pd.read_hdf('data.h5', 'df')
            A  B
            a  1  4
            b  2  5
            c  3  6
            >>> pd.read_hdf('data.h5', 's')
            0    1
            1    2
            2    3
            3    4
            dtype: int64

            Deleting file with data:

            >>> import os
            >>> os.remove('data.h5')

                :param hdf5file:
                :return:
        """


class Demo2:

    def pd_hdfstore(self):
        """
class HDFStore(builtins.object)
 |  HDFStore(path, mode: str = 'a', complevel: Union[int, NoneType] = None, complib=None, fletcher32: bool = False, **kwargs)
 |
 |  Dict-like IO interface for storing pandas objects in PyTables.
 |
 |  Either Fixed or Table format.
 |
 |  .. warning::
 |
 |     Pandas uses PyTables for reading and writing HDF5 files, which allows
 |     serializing object-dtype data with pickle when using the "fixed" format.
 |     Loading pickled data received from untrusted sources can be unsafe.
 |
 |     See: https://docs.python.org/3/library/pickle.html for more.
 |
 |  Parameters
 |  ----------
 |  path : str
 |      File path to HDF5 file.
 |  mode : {'a', 'w', 'r', 'r+'}, default 'a'
 |
 |      ``'r'``
 |          Read-only; no data can be modified.
 |      ``'w'``
 |          Write; a new file is created (an existing file with the same
 |          name would be deleted).
 |      ``'a'``
 |          Append; an existing file is opened for reading and writing,
 |          and if the file does not exist it is created.
 |      ``'r+'``
 |          It is similar to ``'a'``, but the file must already exist.
 |  complevel : int, 0-9, default None
 |      Specifies a compression level for data.
 |      A value of 0 or None disables compression.
 |  complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
 |      Specifies the compression library to be used.
 |      As of v0.20.2 these additional compressors for Blosc are supported
 |      (default if no compressor specified: 'blosc:blosclz'):
 |      {'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
 |       'blosc:zlib', 'blosc:zstd'}.
 |      Specifying a compression library which is not available issues
 |      a ValueError.
 |  fletcher32 : bool, default False
 |      If applying compression use the fletcher32 checksum.
 |  **kwargs
 |      These parameters will be passed to the PyTables open_file method.
 |
 |  Examples
        :return:
 |  --------
 |  >>> bar = pd.DataFrame(np.random.randn(10, 4))
 |  >>> store = pd.HDFStore('syntax.h5')
 |  >>> store['foo'] = bar   # write to HDF5
 |  >>> bar = store['foo']   # retrieve
 |  >>> store.close()
 |
 |  **Create or load HDF5 file in-memory**
 |
 |  When passing the `driver` option to the PyTables open_file method through
 |  **kwargs, the HDF5 file is loaded or created in-memory and will only be
 |  written when closed:
 |
 |  >>> bar = pd.DataFrame(np.random.randn(10, 4))
 |  >>> store = pd.HDFStore('syntax.h5', driver='H5FD_CORE')
 |  >>> store['foo'] = bar
 |  >>> store.close()   # only now, data is written to disk
 |
 |  Methods defined here:
 |
 |  __contains__(self, key: str) -> bool
 |      check for existence of this key
 |      can match the exact pathname or the pathnm w/o the leading '/'
 |
 |  __delitem__(self, key: str)
 |
 |  __enter__(self)
 |
 |  __exit__(self, exc_type, exc_value, traceback)
 |
 |  __fspath__(self)
 |
 |  __getattr__(self, name: str)
 |      allow attribute access to get stores
 |
 |  __getitem__(self, key: str)
 |
 |  __init__(self, path, mode: str = 'a', complevel: Union[int, NoneType] = None, complib=None, fletcher32: bool = False, **kwargs)
 |      Initialize self.  See help(type(self)) for accurate signature.
 |
 |  __iter__(self)
 |
 |  __len__(self) -> int
 |
 |  __repr__(self) -> str
 |      Return repr(self).
 |
 |  __setitem__(self, key: str, value)
 |
 |  append(self, key: str, value: ~FrameOrSeries, format=None, axes=None, index=True, append=True, complib=None, complevel: Union[int, NoneType] = None, columns=None, min_itemsize: Union[
int, Dict[str, int], NoneType] = None, nan_rep=None, chunksize=None, expectedrows=None, dropna: Union[bool, NoneType] = None, data_columns: Union[List[str], NoneType] = None, encoding=Non
e, errors: str = 'strict')
 |      Append to Table in file. Node must already exist and be Table
 |      format.
 |
 |      Parameters
 |      ----------
 |      key : str
 |      value : {Series, DataFrame}
 |      format : 'table' is the default
 |          Format to use when storing object in HDFStore.  Value can be one of:
 |
 |          ``'table'``
 |              Table format. Write as a PyTables Table structure which may perform
 |              worse but allow more flexible operations like searching / selecting
 |              subsets of the data.
 |      append       : bool, default True
 |          Append the input data to the existing.
 |      data_columns : list of columns, or True, default None
 |          List of columns to create as indexed data columns for on-disk
 |          queries, or True to use all columns. By default only the axes
 |          of the object are indexed. See `here
 |          <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
 |      min_itemsize : dict of columns that specify minimum str sizes
 |      nan_rep      : str to use as str nan representation
 |      chunksize    : size to chunk the writing
 |      expectedrows : expected TOTAL row size of this table
 |      encoding     : default None, provide an encoding for str
 |      dropna : bool, default False
 |          Do not write an ALL nan row to the store settable
 |          by the option 'io.hdf.dropna_table'.
 |
 |      Notes
 |      -----
 |      Does *not* check if data being appended overlaps with existing
 |      data in the table, so be careful
 |
 |  append_to_multiple(self, d: Dict, value, selector, data_columns=None, axes=None, dropna=False, **kwargs)
 |      Append to multiple tables
 |
 |      Parameters
 |      ----------
 |      d : a dict of table_name to table_columns, None is acceptable as the
 |          values of one node (this will get all the remaining columns)
 |      value : a pandas object
 |      selector : a string that designates the indexable table; all of its
 |          columns will be designed as data_columns, unless data_columns is
 |          passed, in which case these are used
 |      data_columns : list of columns to create as data columns, or True to
 |          use all columns
|      dropna : if evaluates to True, drop rows from all tables if any single
 |               row in each table has all NaN. Default False.
 |
 |      Notes
 |      -----
 |      axes parameter is currently not accepted
 |
 |  close(self)
 |      Close the PyTables file handle
 |
 |  copy(self, file, mode='w', propindexes: bool = True, keys=None, complib=None, complevel: Union[int, NoneType] = None, fletcher32: bool = False, overwrite=True)
 |      Copy the existing store to a new file, updating in place.
 |
 |      Parameters
 |      ----------
 |      propindexes : bool, default True
 |          Restore indexes in copied file.
 |      keys : list, optional
 |          List of keys to include in the copy (defaults to all).
 |      overwrite : bool, default True
 |          Whether to overwrite (remove and replace) existing nodes in the new store.
 |      mode, complib, complevel, fletcher32 same as in HDFStore.__init__
 |
 |      Returns
 |      -------
 |      open file handle of the new store
 |
 |  create_table_index(self, key: str, columns=None, optlevel: Union[int, NoneType] = None, kind: Union[str, NoneType] = None)
 |      Create a pytables index on the table.
 |
 |      Parameters
 |      ----------
 |      key : str
 |      columns : None, bool, or listlike[str]
 |          Indicate which columns to create an index on.
 |
 |          * False : Do not create any indexes.
 |          * True : Create indexes on all columns.
 |          * None : Create indexes on all columns.
 |          * listlike : Create indexes on the given columns.
 |
 |      optlevel : int or None, default None
 |          Optimization level, if None, pytables defaults to 6.
 |      kind : str or None, default None
 |          Kind of index, if None, pytables defaults to "medium".
 |
 |      Raises
 |      ------
 |      TypeError: raises if the node is not a table
 |
   |  flush(self, fsync: bool = False)
 |      Force all buffered modifications to be written to disk.
 |
 |      Parameters
 |      ----------
 |      fsync : bool (default False)
 |        call ``os.fsync()`` on the file handle to force writing to disk.
 |
 |      Notes
 |      -----
 |      Without ``fsync=True``, flushing may not guarantee that the OS writes
 |      to disk. With fsync, the operation will block until the OS claims the
 |      file has been written; however, other caching layers may still
 |      interfere.
 |
 |  get(self, key: str)
 |      Retrieve pandas object stored in file.
 |
 |      Parameters
 |      ----------
 |      key : str
 |
 |      Returns
 |      -------
 |      object
 |          Same type as object stored in file.
 |
 |  get_node(self, key: str) -> Union[ForwardRef('Node'), NoneType]
 |      return the node with the key or None if it does not exist
 |
 |  get_storer(self, key: str) -> Union[ForwardRef('GenericFixed'), ForwardRef('Table')]
 |      return the storer object for a key, raise if not in the file
 |
 |  groups(self)
 |      Return a list of all the top-level nodes.
 |
 |      Each node returned is not a pandas storage object.
 |
 |      Returns
 |      -------
 |      list
 |          List of objects.
 |
 |  info(self) -> str
 |      Print detailed information on the store.
 |
 |      Returns
 |      -------
 |      str
 |
 |  items(self)
 |      iterate on key->group
 |
 |  iteritems = items(self)
 |
 |  keys(self, include: str = 'pandas') -> List[str]
 |      Return a list of keys corresponding to objects stored in HDFStore.
 |
 |      Parameters
 |      ----------
 |
 |      include : str, default 'pandas'
 |              When kind equals 'pandas' return pandas objects.
 |              When kind equals 'native' return native HDF5 Table objects.
 |
 |              .. versionadded:: 1.1.0
 |
 |      Returns
 |      -------
 |      list
 |          List of ABSOLUTE path-names (e.g. have the leading '/').
 |
 |      Raises
 |      ------
 |      raises ValueError if kind has an illegal value
 |
 |  open(self, mode: str = 'a', **kwargs)
 |      Open the file in the specified mode
 |
 |      Parameters
 |      ----------
 |      mode : {'a', 'w', 'r', 'r+'}, default 'a'
 |          See HDFStore docstring or tables.open_file for info about modes
 |      **kwargs
 |          These parameters will be passed to the PyTables open_file method.
 |
 |  put(self, key: str, value: ~FrameOrSeries, format=None, index=True, append=False, complib=None, complevel: Union[int, NoneType] = None, min_itemsize: Union[int, Dict[str, int], NoneTy
pe] = None, nan_rep=None, data_columns: Union[List[str], NoneType] = None, encoding=None, errors: str = 'strict', track_times: bool = True, dropna: bool = False)
 |      Store object in HDFStore.
 |
 |      Parameters
 |      ----------
 |      key : str
 |      value : {Series, DataFrame}
 |      format : 'fixed(f)|table(t)', default is 'fixed'
 |          Format to use when storing object in HDFStore. Value can be one of:
 |
 |          ``'fixed'``
 |              Fixed format.  Fast writing/reading. Not-appendable, nor searchable.
 |          ``'table'``
 |              Table format.  Write as a PyTables Table structure which may perform
 |              worse but allow more flexible operations like searching / selecting
 |              subsets of the data.
 |      append : bool, default False
 |          This will force Table format, append the input data to the existing.
 |      data_columns : list, default None
 |          List of columns to create as data columns, or True to use all columns.
 |          See `here
 |          <https://pandas.pydata.org/pandas-docs/stable/user_guide/io.html#query-via-data-columns>`__.
 |      encoding : str, default None
 |          Provide an encoding for strings.
 |      track_times : bool, default True
 |          Parameter is propagated to 'create_table' method of 'PyTables'.
 |          If set to False it enables to have the same h5 files (same hashes)
 |          independent on creation time.
 |
 |          .. versionadded:: 1.1.0
 |
 |  remove(self, key: str, where=None, start=None, stop=None)
 |      Remove pandas object partially by specifying the where condition
 |
 |      Parameters
 |      ----------
 |      key : string
 |          Node to remove or delete rows from
 |      where : list of Term (or convertible) objects, optional
 |      start : integer (defaults to None), row number to start selection
 |      stop  : integer (defaults to None), row number to stop selection
 |
 |      Returns
 |      -------
 |      number of rows removed (or None if not a Table)
 |
 |      Raises
 |      ------
 |      raises KeyError if key is not a valid store
 |
 |  select(self, key: str, where=None, start=None, stop=None, columns=None, iterator=False, chunksize=None, auto_close: bool = False)
 |      Retrieve pandas object stored in file, optionally based on where criteria.
 |
 |      .. warning::
 |
 |         Pandas uses PyTables for reading and writing HDF5 files, which allows
 |         serializing object-dtype data with pickle when using the "fixed" format.
 |         Loading pickled data received from untrusted sources can be unsafe.
 |
 |         See: https://docs.python.org/3/library/pickle.html for more.
 |
 |      Parameters
 |      ----------
 |      key : str
 |          Object being retrieved from file.
 |      where : list or None
 |          List of Term (or convertible) objects, optional.
 |      start : int or None
 |          Row number to start selection.
 |      stop : int, default None
 |          Row number to stop selection.
 |      columns : list or None
 |          A list of columns that if not None, will limit the return columns.
 |      iterator : bool or False
 |          Returns an iterator.
 |      chunksize : int or None
 |          Number or rows to include in iteration, return an iterator.
 |      auto_close : bool or False
 |          Should automatically close the store when finished.
 |
 |      Returns
 |      -------
 |      object
 |          Retrieved object from file.
 |
 |  select_as_coordinates(self, key: str, where=None, start: Union[int, NoneType] = None, stop: Union[int, NoneType] = None)
 |      return the selection as an Index
 |
      """

        pass

