# coding: utf-8


import pandas as pd
import numpy as np


class Demo:

    def __init__(self):
        from ch4data import sec0_datasets as dds
        self.df = dds.DataSet.df5.copy()
        # print(f"{self.df}")

    def st_ag_methods(self):
        # mean
        """
        mean(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs)
            Return the mean of the values for the requested axis.

            Parameters
            ----------
            axis : {index (0), columns (1)}
                Axis for the function to be applied on.
            skipna : bool, default True
                Exclude NA/null values when computing the result.
            level : int or level name, default None
                If the axis is a MultiIndex (hierarchical), count along a
                particular level, collapsing into a Series.
            numeric_only : bool, default None
                Include only float, int, boolean columns. If None, will attempt to use
                everything, then use only numeric data. Not implemented for Series.
            **kwargs
                Additional keyword arguments to be passed to the function.

            Returns
            -------
            Series or DataFrame (if level specified)
        """

        # mode
        """
        mode(self, axis=0, numeric_only=False, dropna=True) -> 'DataFrame'
            Get the mode(s) of each element along the selected axis.

            The mode of a set of values is the value that appears most often.
            It can be multiple values.

            Parameters
            ----------
            axis : {0 or 'index', 1 or 'columns'}, default 0
                The axis to iterate over while searching for the mode:

                * 0 or 'index' : get mode of each column
                * 1 or 'columns' : get mode of each row.

            numeric_only : bool, default False
                If True, only apply to numeric columns.
            dropna : bool, default True
                Don't consider counts of NaN/NaT.

                .. versionadded:: 0.24.0

            Returns
            -------
            DataFrame
                The modes of each column or row.

            See Also
            --------
            Series.mode : Return the highest frequency value in a Series.
            Series.value_counts : Return the counts of values in a Series.

            Examples
            --------
            >>> df = pd.DataFrame([('bird', 2, 2),
            ...                    ('mammal', 4, np.nan),
            ...                    ('arthropod', 8, 0),
            ...                    ('bird', 2, np.nan)],
            ...                   index=('falcon', 'horse', 'spider', 'ostrich'),
            ...                   columns=('species', 'legs', 'wings'))
            >>> df
                       species  legs  wings
            falcon        bird     2    2.0
            horse       mammal     4    NaN
            spider   arthropod     8    0.0
            ostrich       bird     2    NaN

            By default, missing values are not considered, and the mode of wings
            are both 0 and 2. The second row of species and legs contains ``NaN``,
            because they have only one mode, but the DataFrame has two rows.

            >>> df.mode()
              species  legs  wings
            0    bird   2.0    0.0
            1     NaN   NaN    2.0

            Setting ``dropna=False`` ``NaN`` values are considered and they can be
            the mode (like for wings).

            >>> df.mode(dropna=False)
              species  legs  wings
            0    bird     2    NaN

            Setting ``numeric_only=True``, only the mode of numeric columns is
            computed, and columns of other types are ignored.

            >>> df.mode(numeric_only=True)
               legs  wings
            0   2.0    0.0
            1   NaN    2.0

            To compute the mode over columns and not rows, use the axis parameter:

            >>> df.mode(axis='columns', numeric_only=True)
                       0    1
            falcon   2.0  NaN
            horse    4.0  NaN
            spider   0.0  8.0
            ostrich  2.0  NaN
        """

        # median
        """
        median(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs)
            Return the median of the values for the requested axis.

            Parameters
            ----------
            axis : {index (0), columns (1)}
                Axis for the function to be applied on.
            skipna : bool, default True
                Exclude NA/null values when computing the result.
            level : int or level name, default None
                If the axis is a MultiIndex (hierarchical), count along a
                particular level, collapsing into a Series.
            numeric_only : bool, default None
                Include only float, int, boolean columns. If None, will attempt to use
                everything, then use only numeric data. Not implemented for Series.
            **kwargs
                Additional keyword arguments to be passed to the function.

            Returns
            -------
            Series or DataFrame (if level specified)
        """

        # max
        """
        max(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs)
            Return the maximum of the values for the requested axis.

            If you want the *index* of the maximum, use ``idxmax``. This isthe equivalent of the ``numpy.ndarray`` method ``argmax``.

            Parameters
            ----------
            axis : {index (0), columns (1)}
                Axis for the function to be applied on.
            skipna : bool, default True
                Exclude NA/null values when computing the result.
            level : int or level name, default None
                If the axis is a MultiIndex (hierarchical), count along a
                particular level, collapsing into a Series.
            numeric_only : bool, default None
                Include only float, int, boolean columns. If None, will attempt to use
                everything, then use only numeric data. Not implemented for Series.
            **kwargs
                Additional keyword arguments to be passed to the function.

            Returns
            -------
            Series or DataFrame (if level specified)

            See Also
            --------
            Series.sum : Return the sum.
            Series.min : Return the minimum.
            Series.max : Return the maximum.
            Series.idxmin : Return the index of the minimum.
            Series.idxmax : Return the index of the maximum.
            DataFrame.sum : Return the sum over the requested axis.
            DataFrame.min : Return the minimum over the requested axis.
            DataFrame.max : Return the maximum over the requested axis.
            DataFrame.idxmin : Return the index of the minimum over the requested axis.
            DataFrame.idxmax : Return the index of the maximum over the requested axis.

            Examples
            --------
            >>> idx = pd.MultiIndex.from_arrays([
            ...     ['warm', 'warm', 'cold', 'cold'],
            ...     ['dog', 'falcon', 'fish', 'spider']],
            ...     names=['blooded', 'animal'])
            >>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
            >>> s
            blooded  animal
            warm     dog       4
                     falcon    2
            cold     fish      0
                     spider    8
            Name: legs, dtype: int64

            >>> s.max()
            8

            Max using level names, as well as indices.

            >>> s.max(level='blooded')
            blooded
            warm    4
            cold    8
            Name: legs, dtype: int64

            >>> s.max(level=0)
            blooded
            warm    4
            cold    8
            Name: legs, dtype: int64
        """

        # min
        """
        min(self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs)
            Return the minimum of the values for the requested axis.

            If you want the *index* of the minimum, use ``idxmin``. This is the equivalent
             of the ``numpy.ndarray`` method ``argmin``.

            Parameters
            ----------
            axis : {index (0), columns (1)}
                Axis for the function to be applied on.
            skipna : bool, default True
                Exclude NA/null values when computing the result.
            level : int or level name, default None
                If the axis is a MultiIndex (hierarchical), count along a
                particular level, collapsing into a Series.
            numeric_only : bool, default None
                Include only float, int, boolean columns. If None, will attempt to use
                everything, then use only numeric data. Not implemented for Series.
            **kwargs
                Additional keyword arguments to be passed to the function.
        """

        # std
        """
        std(self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs)
            Return sample standard deviation over requested axis.

            Normalized by N-1 by default. This can be changed using the dd of argument

            Parameters
            ----------
            axis : {index (0), columns (1)}
            skipna : bool, default True
                Exclude NA/null values. If an entire row/column is NA, the result
                will be NA.
            level : int or level name, default None
                If the axis is a MultiIndex (hierarchical), count along a
                particular level, collapsing into a Series.
            ddof : int, default 1
                Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
                where N represents the number of elements.
            numeric_only : bool, default None
                Include only float, int, boolean columns. If None, will attempt to use
                everything, then use only numeric data. Not implemented for Series.

            Returns
            -------
            Series or DataFrame (if level specified)
        """

        # count
        """
        count(self, axis=0, level=None, numeric_only=False)
            Count non-NA cells for each column or row.

            The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
            on `pandas.options.mode.use_inf_as_na`) are considered NA.

            Parameters
            ----------
            axis : {0 or 'index', 1 or 'columns'}, default 0
                If 0 or 'index' counts are generated for each column.
                If 1 or 'columns' counts are generated for each row.
            level : int or str, optional
                If the axis is a `MultiIndex` (hierarchical), count along a
                particular `level`, collapsing into a `DataFrame`.
                A `str` specifies the level name.
            numeric_only : bool, default False
                Include only `float`, `int` or `boolean` data.

            Returns
            -------
            Series or DataFrame
                For each column/row the number of non-NA/null entries.
                If `level` is specified returns a `DataFrame`.
        """

        # sum
        """
        sum(self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0, **kwargs)
            Return the sum of the values for the requested axis.

            This is equivalent to the method ``numpy.sum``.

            Parameters
            ----------
            axis : {index (0), columns (1)}
                Axis for the function to be applied on.
            skipna : bool, default True
                Exclude NA/null values when computing the result.
            level : int or level name, default None
                If the axis is a MultiIndex (hierarchical), count along a
                particular level, collapsing into a Series.
            numeric_only : bool, default None
                Include only float, int, boolean columns. If None, will attempt to use
                everything, then use only numeric data. Not implemented for Series.
            min_count : int, default 0
                The required number of valid values to perform the operation. If fewer than
                ``min_count`` non-NA values are present the result will be NA.

                .. versionadded:: 0.22.0

                   Added with the default being 0. This means the sum of an all-NA
                   or empty Series is 0, and the product of an all-NA or empty
                   Series is 1.
            **kwargs
                Additional keyword arguments to be passed to the function.

            Returns
            -------
            Series or DataFrame (if level specified)

            Examples
            --------
            >>> idx = pd.MultiIndex.from_arrays([
            ...     ['warm', 'warm', 'cold', 'cold'],
            ...     ['dog', 'falcon', 'fish', 'spider']],
            ...     names=['blooded', 'animal'])

            >>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
            >>> s
            blooded  animal
            warm     dog       4
                     falcon    2
            cold     fish      0
                     spider    8
            Name: legs, dtype: int64

            >>> s.sum()
            14

            Sum using level names, as well as indices.

            >>> s.sum(level='blooded')
            blooded
            warm    6
            cold    8
            Name: legs, dtype: int64

            >>> s.sum(level=0)
            blooded
            warm    6
            cold    8
            Name: legs, dtype: int64

            By default, the sum of an empty or all-NA Series is ``0``.

            >>> pd.Series([]).sum()  # min_count=0 is the default
            0.0

            This can be controlled with the ``min_count`` parameter. For example, if
            you'd like the sum of an empty series to be NaN, pass ``min_count=1``.

            >>> pd.Series([]).sum(min_count=1)
            nan

            Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
            empty series identically.

            >>> pd.Series([np.nan]).sum()
            0.0

            >>> pd.Series([np.nan]).sum(min_count=1)
            nan
        """

        # prod
        """
        prod(self, axis=None, skipna=None, level=None, numeric_only=None, min_count=0, **kwargs)
            Return the product of the values for the requested axis.

            Parameters
            ----------
            axis : {index (0), columns (1)}
                Axis for the function to be applied on.
            skipna : bool, default True
                Exclude NA/null values when computing the result.
            level : int or level name, default None
                If the axis is a MultiIndex (hierarchical), count along a
                particular level, collapsing into a Series.
            numeric_only : bool, default None
                Include only float, int, boolean columns. If None, will attempt to use
                everything, then use only numeric data. Not implemented for Series.
            min_count : int, default 0
                The required number of valid values to perform the operation. If fewer than
                ``min_count`` non-NA values are present the result will be NA.

                .. versionadded:: 0.22.0

                   Added with the default being 0. This means the sum of an all-NA
                   or empty Series is 0, and the product of an all-NA or empty
                   Series is 1.
            **kwargs
                Additional keyword arguments to be passed to the function.

            Returns
            -------
            Series or DataFrame (if level specified)

            Examples
            --------
            By default, the product of an empty or all-NA Series is ``1``

            >>> pd.Series([]).prod()
            1.0

            This can be controlled with the ``min_count`` parameter

            >>> pd.Series([]).prod(min_count=1)
            nan

            Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
            empty series identically.

            >>> pd.Series([np.nan]).prod()
            1.0

            >>> pd.Series([np.nan]).prod(min_count=1)
            nan
        """

        # mad
        """
        mad(self, axis=None, skipna=None, level=None)
            Return the mean absolute deviation of the values for the requested axis.
        
            Parameters
            ----------
            axis : {index (0), columns (1)}
                Axis for the function to be applied on.
            skipna : bool, default None
                Exclude NA/null values when computing the result.
            level : int or level name, default None
                If the axis is a MultiIndex (hierarchical), count along a
                particular level, collapsing into a Series.
        
            Returns
            -------
            Series or DataFrame (if level specified)
        
        """

        df = self.df.copy()
        df.iat[0, 0] = 70
        df.iat[3, 0] = 70
        df.iat[0, 1] = 75
        df.iat[3, 1] = 75
        df.iat[1, 0] = 99
        df.iat[5, 0] = 95
        df.iat[5, 1] = 100
        df.iat[10, 0] = 10
        df.iat[11, 1] = 5
        df.loc[:, 'pass'] = df.math.apply(lambda x: 'yes' if x > 70 else 'no')
        df.loc[:, 'grade'] = df.math.apply(lambda x: 1 if x < 70 else
                                           (None if x in [70, 74] else (2 if x < 80 else 3)))
        df['pass'] = pd.Categorical(df['pass'])

        print(
            f"{df.info()}\n"
            f" >>> df\n"
            f"{df}\n"

            f"{'-'*80}\n"
            f"# 计算均值\n"
            f"{df.mean()}\n"

            f"# 计算层次1的均值"
            f" >>> df.mean(level=1)\n"
            f"{df.mean(level=1)}\n"

            f"{'-'*80}\n"
            f"# 计算均值，沿列向计算，排除NaN值, 计算结果进行一次行索引转置\n"
            f" >>> df.mean(axis=0, level=1, skipna=False).unstack()\n"
            f"{df.mean(axis=1, skipna=False).unstack()}\n"

            f"{'-'*80}\n"
            f"# 计算样本数，可针对更多数值类型\n"
            f" >>> df.count(level=1)\n"
            f"{df.count(level=1)}\n"

            f"{'-' * 80}\n"
            f"# 计算中位数\n"
            f" >>> df.median(level=1)\n"
            f"{df.median(level=1)}\n"

            f"{'-' * 80}\n"
            f"# 计算平均绝对偏差\n"
            f" >>> df.mad(level=1)\n"
            f"{df.mad(level=0)}\n"

            f"{'-' * 80}\n"
            f"# 计算分布的偏度\n"
            f" >>> df.skew(level=1)\n"
            f"{df.skew(level=0)}\n"

            f"{'-' * 80}\n"
            f"# 计算分布的峰度\n"
            f" >>> df.kurt(level=1)\n"
            f"{df.kurt(level=0)}\n"

            f"{'-' * 80}\n"
        )

        return

    def st_describe(self):
        """
        describe(self: 'FrameOrSeries', percentiles=None, include=None, exclude=None, datetime_is_numeric=False)
         -> 'FrameOrSeries'

            Generate descriptive statistics.

            Descriptive statistics include those that summarize the central
            tendency, dispersion and shape of a
            dataset's distribution, excluding ``NaN`` values.

            Analyzes both numeric and object series, as well
            as ``DataFrame`` column sets of mixed data types. The output
            will vary depending on what is provided. Refer to the notes
            below for more detail.

            Parameters
            ----------
            percentiles : list-like of numbers, optional
                The percentiles to include in the output. All should
                fall between 0 and 1. The default is
                ``[.25, .5, .75]``, which returns the 25th, 50th, and
                75th percentiles.
            include : 'all', list-like of dtypes or None (default), optional
                A white list of data types to include in the result. Ignored
                for ``Series``. Here are the options:

                - 'all' : All columns of the input will be included in the output.
                - A list-like of dtypes : Limits the results to the
                  provided data types.
                  To limit the result to numeric types submit
                  ``numpy.number``. To limit it instead to object columns submit
                  the ``numpy.object`` data type. Strings
                  can also be used in the style of
                  ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
                  select pandas categorical columns, use ``'category'``
                - None (default) : The result will include all numeric columns.
            exclude : list-like of dtypes or None (default), optional,
                A black list of data types to omit from the result. Ignored
                for ``Series``. Here are the options:

                - A list-like of dtypes : Excludes the provided data types
                  from the result. To exclude numeric types submit
                  ``numpy.number``. To exclude object columns submit the data
                  type ``numpy.object``. Strings can also be used in the style of
                  ``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
                  exclude pandas categorical columns, use ``'category'``
                - None (default) : The result will exclude nothing.
            datetime_is_numeric : bool, default False
                Whether to treat datetime dtypes as numeric. This affects statistics
                calculated for the column. For DataFrame input, this also
                controls whether datetime columns are included by default.

                .. versionadded:: 1.1.0

            Returns
            -------
            Series or DataFrame
                Summary statistics of the Series or Dataframe provided.

            See Also
            --------
            DataFrame.count: Count number of non-NA/null observations.
            DataFrame.max: Maximum of the values in the object.
            DataFrame.min: Minimum of the values in the object.
            DataFrame.mean: Mean of the values.
            DataFrame.std: Standard deviation of the observations.
            DataFrame.select_dtypes: Subset of a DataFrame including/excluding
                columns based on their dtype.

            Notes
            -----
            For numeric data, the result's index will include ``count``,
            ``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
            upper percentiles. By default the lower percentile is ``25`` and the
            upper percentile is ``75``. The ``50`` percentile is the
            same as the median.

            For object data (e.g. strings or timestamps), the result's index
            will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
            is the most common value. The ``freq`` is the most common value's
            frequency. Timestamps also include the ``first`` and ``last`` items.

            If multiple object values have the highest count, then the
            ``count`` and ``top`` results will be arbitrarily chosen from
            among those with the highest count.

            For mixed data types provided via a ``DataFrame``, the default is to
            return only an analysis of numeric columns. If the dataframe consists
            only of object and categorical data without any numeric columns, the
            default is to return an analysis of both the object and categorical
            columns. If ``include='all'`` is provided as an option, the result
            will include a union of attributes of each type.
            The `include` and `exclude` parameters can be used to limit
            which columns in a ``DataFrame`` are analyzed for the output.
            The parameters are ignored when analyzing a ``Series``.

            Examples
            --------
            Describing a numeric ``Series``.

            >>> s = pd.Series([1, 2, 3])
            >>> s.describe()
            count    3.0
            mean     2.0
            std      1.0
            min      1.0
            25%      1.5
            50%      2.0
            75%      2.5
            max      3.0
            dtype: float64

            Describing a categorical ``Series``.

            >>> s = pd.Series(['a', 'a', 'b', 'c'])
            >>> s.describe()
            count     4
            unique    3
            top       a
            freq      2
            dtype: object

            Describing a timestamp ``Series``.

            >>> s = pd.Series([
            ...   np.datetime64("2000-01-01"),
            ...   np.datetime64("2010-01-01"),
            ...   np.datetime64("2010-01-01")
            ... ])
            >>> s.describe(datetime_is_numeric=True)
            count                      3
            mean     2006-09-01 08:00:00
            min      2000-01-01 00:00:00
            25%      2004-12-31 12:00:00
            50%      2010-01-01 00:00:00
            75%      2010-01-01 00:00:00
            max      2010-01-01 00:00:00
            dtype: object

            Describing a ``DataFrame``. By default only numeric fields
            are returned.

            >>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
            ...                    'numeric': [1, 2, 3],
            ...                    'object': ['a', 'b', 'c']
            ...                   })
            >>> df.describe()
                   numeric
            count      3.0
            mean       2.0
            std        1.0
            min        1.0
            25%        1.5
            50%        2.0
            75%        2.5
            max        3.0

            Describing all columns of a ``DataFrame`` regardless of data type.

            >>> df.describe(include='all')  # doctest: +SKIP
                   categorical  numeric object
            count            3      3.0      3
            unique           3      NaN      3
            top              f      NaN      a
            freq             1      NaN      1
            mean           NaN      2.0    NaN
            std            NaN      1.0    NaN
            min            NaN      1.0    NaN
            25%            NaN      1.5    NaN
            50%            NaN      2.0    NaN
            75%            NaN      2.5    NaN
            max            NaN      3.0    NaN

            Describing a column from a ``DataFrame`` by accessing it as
            an attribute.
            >>> df.numeric.describe()
            count    3.0
            mean     2.0
            std      1.0
            min      1.0
            25%      1.5
            50%      2.0
            75%      2.5
            max      3.0
            Name: numeric, dtype: float64

            Including only numeric columns in a ``DataFrame`` description.

            >>> df.describe(include=[np.number])

                   numeric
            count      3.0
            mean       2.0
            std        1.0
            min        1.0
            25%        1.5
            50%        2.0
            75%        2.5
            max        3.0

            Including only string columns in a ``DataFrame`` description.

            >>> df.describe(include=[object])  # doctest: +SKIP
                   object
            count       3
            unique      3
            top         a
            freq        1

            Including only categorical columns from a ``DataFrame`` description.

            >>> df.describe(include=['category'])
                   categorical
            count            3
            unique           3
            top              d
            freq             1

            Excluding numeric columns from a ``DataFrame`` description.

            >>> df.describe(exclude=[np.number])  # doctest: +SKIP
                   categorical object
            count            3      3
            unique           3      3
            top              f      a
            freq             1      1

            Excluding object columns from a ``DataFrame`` description.

            >>> df.describe(exclude=[object])  # doctest: +SKIP
                   categorical  numeric
            count            3      3.0
            unique           3      NaN
            top              f      NaN
            freq             1      NaN
            mean           NaN      2.0
            std            NaN      1.0
            min            NaN      1.0
            25%            NaN      1.5
            50%            NaN      2.0
            75%            NaN      2.5
            max            NaN      3.0
        """
        import datetime as dt
        dtt = lambda x: dt.datetime(*x)
        df = pd.DataFrame(
            {'name': ['Li', 'He', 'Wu', 'Si', 'Mi'],
             'age': [25, 33, 25, 19, 20],
             'high': [180, 178, 181, 170, 190],
             'birth': map(dtt, [(2000, 2, 3), (1992, 3, 5), (2000, 2, 3), (2006, 3, 9), (2005, 9, 1)]),
             'lang': pd.Categorical(['Chinese', 'English', 'English', 'Spain', 'Chinese'])
             },
        )
        df.astype({'age': np.int8, 'high': np.int8})
        print(
            f" >>> df\n"
            f"{df}\n"
            f""
            f"{df.info()}\n"
            f"{'-' * 100}\n"
            f"# 缺省情况下对数字类型的列进行统计\n"
            " >>> df.describe()\n"
            f"{df.describe()}\n"
            f"{'-' * 100}\n"
            f"# 对所有数据类型进行统计\n"
            f" >>> df.describe(include='all')\n"
            f"{df.describe(include='all')}\n"
            f"{'-' * 100}\n"
            f"# 指定数据类型进行统计\n"
            " >>> df.describe(include={np.integer, 'category})\n"
            f"{df.describe(include={np.integer, 'category'})}\n"
            f"{'-' * 100}\n"
            "# 排除指定数据类型进行统计\n"
            " >>> df.describe(exclude={np.object})\n"
            f"{df.describe(exclude={np.object})}\n"
            f" 注意：将日期作为datetime类型，统计时使用枚举方式，未来会被废弃.\n"
            f"FutureWarning: Treating datetime data as categorical rather than numeric in `.describe` \n"
            f"is deprecated and will be removed in a future version of pandas. Specify `datetime_is_\n"
            f"numeric=True` to silence this warning and adopt the future behavior now.\n"
            f"{'-' * 100}\n"
            f"# Pandas建议使用数字方式对日期时间类型进行统计\n"
            f" >>> df.describe(datetime_is_numeric=True)\n"
            f"{df.describe(datetime_is_numeric=True)}"
        )

        return df

    def st_cum_rank(self):
        """
        rank(self: ~FrameOrSeries, axis=0, method: str = 'average', numeric_only: Union[bool, NoneType] = None,
             na_option: str = 'keep', ascending: bool = True, pct: bool = False)
            -> ~FrameOrSeries

            Compute numerical data ranks (1 through n) along axis.

            By default, equal values are assigned a rank that is the average of the
            ranks of those values.

            Parameters
            ----------
            axis : {0 or 'index', 1 or 'columns'}, default 0
                Index to direct ranking.
            method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
                How to rank the group of records that have the same value (i.e. ties):

                * average: average rank of the group
                * min: lowest rank in the group
                * max: highest rank in the group
                * first: ranks assigned in order they appear in the array
                * dense: like 'min', but rank always increases by 1 between groups.

            numeric_only : bool, optional
                For DataFrame objects, rank only numeric columns if set to True.
            na_option : {'keep', 'top', 'bottom'}, default 'keep'
                How to rank NaN values:
                * keep: assign NaN rank to NaN values
                * top: assign smallest rank to NaN values if ascending
                * bottom: assign highest rank to NaN values if ascending.

            ascending : bool, default True
                Whether or not the elements should be ranked in ascending order.
            pct : bool, default False
                Whether or not to display the returned rankings in percentile
                form.

            Returns
            -------
            same type as caller
                Return a Series or DataFrame with data ranks as values.

            See Also
            --------
            core.groupby.GroupBy.rank : Rank of values within each group.

            Examples
            --------
            >>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
            ...                                    'spider', 'snake'],
            ...                         'Number_legs': [4, 2, 4, 8, np.nan]})
            >>> df
                Animal  Number_legs
            0      cat          4.0
            1  penguin          2.0
            2      dog          4.0
            3   spider          8.0
            4    snake          NaN

            The following example shows how the method behaves with the above
            parameters:

            * default_rank: this is the default behaviour obtained without using
              any parameter.
            * max_rank: setting ``method = 'max'`` the records that have the
              same values are ranked using the highest rank (e.g.: since 'cat'
              and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
            * NA_bottom: choosing ``na_option = 'bottom'``, if there are records
              with NaN values they are placed at the bottom of the ranking.
            * pct_rank: when setting ``pct = True``, the ranking is expressed as
              percentile rank.

            >>> df['default_rank'] = df['Number_legs'].rank()
            >>> df['max_rank'] = df['Number_legs'].rank(method='max')
            >>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
            >>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
            >>> df
                Animal  Number_legs  default_rank  max_rank  NA_bottom  pct_rank
            0      cat          4.0           2.5       3.0        2.5     0.625
            1  penguin          2.0           1.0       1.0        1.0     0.250
            2      dog          4.0           2.5       3.0        2.5     0.625
            3   spider          8.0           4.0       4.0        4.0     1.000
            4    snake          NaN           NaN       NaN        5.0       NaN

        """

        import random
        day_offset = pd.offsets.DateOffset(days=1)

        print('=== DataSet Exmaple:')
        df = pd.DataFrame(
            data={'level': [1, 1, 1, 3, 3, 1, 4],
                  'point': [2, 2, 3, None, 3, 3, 5],
                  'name': ['Mike', 'Jack', 'Rice', 'Mike', 'Jack', 'Jack', 'Lee'],
                  'grade': pd.Categorical(['H', 'H', 'M', 'L', 'M', 'L', 'M'], categories=['H', 'M', 'L']),
                  'date': [pd.Timestamp(2010, 1, 1)+day_offset*random.randint(1, 7) for _ in range(7)]
                  },
            index=['y' + str(j) for j in range(7)])
        # df.loc[:, 'name'] = ['Mike', 'Jack', 'Rice', 'Mike', 'Jack', 'Jack', 'Lee']

        # print("rank(axis=0, method='average', numeric_only=None, na_option='keep', ascending=True, pct=False)")
        print(
            f" >>> df\n"
            f"{df}\n"
            f"{df.info()}\n"

            f"{'-' * 80}\n"
            "# 按照缺省设置进行排名计算\n"
            "#     axis=0: 沿行方向进行计算\n"
            "#     method='average': 使用组内平均值作为重复排名名次\n"
            "#     numeric_only=None: 计算所有数据类型排名\n"
            "#     na_option='keep': 保持NaN值不变\n"
            "#     ascending=True: 使用升序方式排序\n"
            "#     pct=False: 不使用百分位排名\n"
            "# 对枚举类型grade，按照其枚举类型值顺序('H'<'M'<'L')进行排名计算\n"
            " >>> df.rank()\n"
            f"{df.rank()}\n"

            f"{'-' * 80}\n"
            "# 针对有重复名次情况，使用稠密计算方式排名\n"
            "# 第一组内使用最小名次1，各组依次递增，重复名次会视为一个名次，不增长名次\n"
            " >>> df.rank(method='dense')\n"
            f"{df.rank(method='dense')}\n"

            f"{'-' * 80}\n"
            "# 仅对数字类型数据进行排名\n"
            " >>> df.rank(numeric_only=True)\n"
            f"{df.rank(numeric_only=True)}\n"

            f"{'-' * 80}\n"
            f"# 设置na_option，对NaN值, 使用最顶端名次(升序时为名次1)\n"
            " >>> df.rank(na_option='top')\n"
            f"{df.rank(na_option='top')}\n"

            f"{'-' * 80}\n"
            "# 沿列向进行排名计算，排除了与数字类型不兼容类型\n"
            " >>> df.rank(axis=1)\n"
            f"{df.rank(axis=1)}\n"
            f"# 计算排名时，枚举类型与字符类型可以兼容\n"
            f" >>> df[['name', 'grade']].rank(axis=1)\n"
            f"{df[['name', 'grade']].rank(axis=1)}\n"
        )

        return df

    def st_cum_quantile(self):
        """
        quantile(self, q=0.5, interpolation='linear')
            Return value at the given quantile.

            Parameters
            ----------
            q : float or array-like, default 0.5 (50% quantile)
                The quantile(s) to compute, which can lie in range: 0 <= q <= 1.
            interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
                This optional parameter specifies the interpolation method to use,
                when the desired quantile lies between two data points `i` and `j`:

                    * linear: `i + (j - i) * fraction`, where `fraction` is the
                      fractional part of the index surrounded by `i` and `j`.
                    * lower: `i`.
                    * higher: `j`.
                    * nearest: `i` or `j` whichever is nearest.
                    * midpoint: (`i` + `j`) / 2.

            Returns
            -------
            float or Series
                If ``q`` is an array, a Series will be returned where the
                index is ``q`` and the values are the quantiles, otherwise
                a float will be returned.

            See Also
            --------
            core.window.Rolling.quantile : Calculate the rolling quantile.
            numpy.percentile : Returns the q-th percentile(s) of the array elements.

            Examples
            --------
            >>> s = pd.Series([1, 2, 3, 4])
            >>> s.quantile(.5)
            2.5
            >>> s.quantile([.25, .5, .75])
            0.25    1.75
            0.50    2.50
            0.75    3.25
            dtype: float64

        """

        # for DataFrame
        """
        quantile(self, q=0.5, axis=0, numeric_only=True, interpolation='linear')
            Return values at the given quantile over requested axis.

            Parameters
            ----------
            q : float or array-like, default 0.5 (50% quantile)
                Value between 0 <= q <= 1, the quantile(s) to compute.
            axis : {0, 1, 'index', 'columns'}, default 0
                Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
            numeric_only : bool, default True
                If False, the quantile of datetime and timedelta data will be
                computed as well.
            interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
                This optional parameter specifies the interpolation method to use,
                when the desired quantile lies between two data points `i` and `j`:

                * linear: `i + (j - i) * fraction`, where `fraction` is the
                  fractional part of the index surrounded by `i` and `j`.
                * lower: `i`.
                * higher: `j`.
                * nearest: `i` or `j` whichever is nearest.
                * midpoint: (`i` + `j`) / 2.

            Returns
            -------
            Series or DataFrame

                If ``q`` is an array, a DataFrame will be returned where the
                  index is ``q``, the columns are the columns of self, and the
                  values are the quantiles.
                If ``q`` is a float, a Series will be returned where the
                  index is the columns of self and the values are the quantiles.

            See Also
            --------
            core.window.Rolling.quantile: Rolling quantile.
            numpy.percentile: Numpy function to compute the percentile.

            Examples
            --------
            >>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
            ...                   columns=['a', 'b'])
            >>> df.quantile(.1)
            a    1.3
            b    3.7
            Name: 0.1, dtype: float64
            >>> df.quantile([.1, .5])
                   a     b
            0.1  1.3   3.7
            0.5  2.5  55.0

            Specifying `numeric_only=False` will also compute the quantile of
            datetime and timedelta data.

            >>> df = pd.DataFrame({'A': [1, 2],
            ...                    'B': [pd.Timestamp('2010'),
            ...                          pd.Timestamp('2011')],
            ...                    'C': [pd.Timedelta('1 days'),
            ...                          pd.Timedelta('2 days')]})
            >>> df.quantile(0.5, numeric_only=False)
            A                    1.5
            B    2010-07-02 12:00:00
            C        1 days 12:00:00
            Name: 0.5, dtype: object
        """

        sr = pd.Series([1, 3, 5, 7, 9])
        df = pd.DataFrame({'a': [1, 3, 5, 7, 9], 'b': [2, 4, 6, 8, 10], 'c': [4, 6, 8, 10, 12],
                           'd': pd.date_range('2010-1-1', freq='1d', periods=5)})

        print(
            f" >>> sr\n"
            f"{sr}\n"
            f"# 计算比例0.3的分位数\n"
            f"# 计算过程：pos=1 + (5-1)*0.3 = 2.2\n"
            f"#          fraction = 0.2\n"
            f"#          quantile(0.3) = 3 + (5-3)*0.2 = 3.4\n"
            f" >>> sr.quantile(0.3)\n"
            f"{'{:.4f}'.format(sr.quantile(0.3))}\n"

            f"{'-' * 80}\n"
            f"# 计算四分位数\n"
            f" >>> sr.quantile([0.25, 0.5, 0.75])\n"
            f"{sr.quantile([0.25, 0.5, 0.75, 1])}\n"

            f"{'-' * 80}\n"
            f"# 使用多种方式计算分位数，"
            f"# nearest、lower、higher三种方式会取原数据序列中的数据值\n"
            f"# linear、midpoint使用插值方式计算，如果位置不是整数，则不是原数据序列中的数据值\n"
            f"# 如果位置1+(n-1)*q为整数，则直接使用对应位置的序列中数据值\n"
            f"# 使用linear方式(缺省)计算分位数\n"
            f" >>> sr.quantile([0.2, 0.5, 0.8])\n"
            f"{sr.quantile([0.2, 0.5, 0.8])}\n"

            f"# 使用lower方式计算分位数\n"
            f" >>> sr.quantile([0.2, 0.5, 0.8], interpolation='lower')\n"
            f"{sr.quantile([0.2, 0.5, 0.8], interpolation='lower')}\n"

            f"# 使用higher方式计算分位数\n"
            f" >>> sr.quantile([0.2, 0.5, 0.8], interpolation='higher')\n"
            f"{sr.quantile([0.2, 0.5, 0.8], interpolation='higher')}\n"

            f"# 使用midpoint方式计算分位数\n"
            f" >>> sr.quantile([0.2, 0.5, 0.8], interpolation='midpoint')\n"
            f"{sr.quantile([0.2, 0.5, 0.8], interpolation='midpoint')}\n"

            f"{'-' * 80}\n"
            f"# 计算DataFrame数据集分位数\n"
            " >>> df\n"
            f"{df}\n"

            # f"{'-' * 80}\n"
            f"# 使用默认设置numeric_only=True， 只计算数字类型数据列\n"
            f" >>> df.quantile([0.1, 0.5, 0.8], interpolation='higher')\n"
            f"{df.quantile([0.2, 0.5, 0.8], interpolation='higher')}\n"

            # f"{'-' * 80}\n"
            f"# 设置numeric_only=False, 时间日期类型参与计算\n"
            f" >>> df.quantile([0.1, 0.5, 0.8], interpolation='higher', numeric_only=False)\n"
            f"{df.quantile([0.2, 0.5, 0.8], interpolation='higher', numeric_only=False)}\n"
        )

        return

    def st_cum_cumxxx(self):
        """
        cumsum(self, axis=None, skipna=True, *args, **kwargs)
            Return cumulative sum over a DataFrame or Series axis.

            Returns a DataFrame or Series of the same size containing the cumulative
            sum.

            Parameters
            ----------
            axis : {0 or 'index', 1 or 'columns'}, default 0
                The index or the name of the axis. 0 is equivalent to None or 'index'.
            skipna : bool, default True
                Exclude NA/null values. If an entire row/column is NA, the result
                will be NA.
            *args, **kwargs
                Additional keywords have no effect but might be accepted for
                compatibility with NumPy.

            Returns
            -------
            Series or DataFrame
                Return cumulative sum of Series or DataFrame.

            See Also
            --------
            core.window.Expanding.sum : Similar functionality
                but ignores ``NaN`` values.
            DataFrame.sum : Return the sum over
                DataFrame axis.
            DataFrame.cummax : Return cumulative maximum over DataFrame axis.
            DataFrame.cummin : Return cumulative minimum over DataFrame axis.
            DataFrame.cumsum : Return cumulative sum over DataFrame axis.
            DataFrame.cumprod : Return cumulative product over DataFrame axis.

            Examples
            --------
            **Series**

            >>> s = pd.Series([2, np.nan, 5, -1, 0])
            >>> s
            0    2.0
            1    NaN
            2    5.0
            3   -1.0
            4    0.0
            dtype: float64

            By default, NA values are ignored.

            >>> s.cumsum()
            0    2.0
            1    NaN
            2    7.0
             3    6.0
            4    6.0
            dtype: float64

            To include NA values in the operation, use ``skipna=False``

            >>> s.cumsum(skipna=False)
            0    2.0
            1    NaN
            2    NaN
            3    NaN
            4    NaN
            dtype: float64

            **DataFrame**

            >>> df = pd.DataFrame([[2.0, 1.0],
            ...                    [3.0, np.nan],
            ...                    [1.0, 0.0]],
            ...                    columns=list('AB'))
            >>> df
                 A    B
            0  2.0  1.0
            1  3.0  NaN
            2  1.0  0.0

            By default, iterates over rows and finds the sum
            in each column. This is equivalent to ``axis=None`` or ``axis='index'``.

            >>> df.cumsum()
                 A    B
            0  2.0  1.0
            1  5.0  NaN
            2  6.0  1.0

            To iterate over columns and find the sum in each row,
            use ``axis=1``

            >>> df.cumsum(axis=1)
                 A    B
            0  2.0  3.0
            1  3.0  NaN
            2  1.0  1.0
       """

        # cumprod
        """
        cumprod(self, axis=None, skipna=True, *args, **kwargs)
            Return cumulative product over a DataFrame or Series axis.
        
            Returns a DataFrame or Series of the same size containing the cumulative
            product.
        
            Parameters
            ----------
            axis : {0 or 'index', 1 or 'columns'}, default 0
                The index or the name of the axis. 0 is equivalent to None or 'index'.
            skipna : bool, default True
                Exclude NA/null values. If an entire row/column is NA, the result
                will be NA.
        """

        # cummax
        """
        cummax(self, axis=None, skipna=True, *args, **kwargs)
            Return cumulative maximum over a DataFrame or Series axis.
        
            Returns a DataFrame or Series of the same size containing the cumulative
            maximum.
        
            Parameters
            ----------
            axis : {0 or 'index', 1 or 'columns'}, default 0
                The index or the name of the axis. 0 is equivalent to None or 'index'.
            skipna : bool, default True
                Exclude NA/null values. If an entire row/column is NA, the result
                will be NA.
        """

        # cummin
        """
        cummin(self, axis=None, skipna=True, *args, **kwargs)
            Return cumulative minimum over a DataFrame or Series axis.
        
            Returns a DataFrame or Series of the same size containing the cumulative
            minimum.
        
            Parameters
            ----------
            axis : {0 or 'index', 1 or 'columns'}, default 0
                The index or the name of the axis. 0 is equivalent to None or 'index'.
            skipna : bool, default True
                Exclude NA/null values. If an entire row/column is NA, the result
                will be NA.
        """

        # df = self.df.unstack()
        # df = df.applymap(lambda x: 1)
        # df = pd.concat([df, pd.Series(list(range(3)) + [np.NaN], index=[1, 2, 3, 4])], axis=1)
        # df = df.drop(labels=[0], axis=1)
        # df.columns = pd.MultiIndex.from_tuples(df.columns)
        # df.drop(columns='science', level=0, inplace=True)
        # df = df.droplevel(level=0, axis=1)
        df = pd.DataFrame(
            {'t1': [1, 5, 0, 9], 't2': [1, 6, None, 5], 't3': [3, 7, -2, 1], 't4': [0, 2, 7, 9]},
            index= range(1, 5))

        print(
            f" >>> df\n"
            f"{df}\n"

            f"{'-'*80}\n"
            f"# 使用缺省方式进行累计计算：沿行索引方向，忽略NaN值\n"
            f"# --- 累计求和\n"
            f" >>> df.cumsum()\n"
            f"{df.cumsum()}\n"
            f"# --- 累计乘积\n"
            f" >>> df.cumprod()\n"
            f"{df.cumprod()}\n"
            f"# --- 累计最大值\n"
            f" >>> df.cummax()\n"
            f"{df.cummax()}\n"
            f"# --- 累计最小值\n"
            f" >>> df.cummin()\n"
            f"{df.cummin()}\n"

            f"{'-'*80}\n"
            f"# 沿列向进行累计计算，忽略NaN值\n"
            f" >>> df.cumsum(axis=1)\n"
            f"{df.cumsum(axis=1)}\n"

            f"{'-'*80}\n"
            f"# 沿行索引方向，不忽略NaN值进行计算\n"
            f" >>> df.cumsum(axis=0, skipna=False)\n"
            f"{df.cumsum(axis=0, skipna=False)}"
        )

        return df

    def st_gp_rolling(self):
        """
        rolling(self, window, min_periods=None, center=False, win_type=None, on=None, axis=0, closed=None)

            Provide rolling window calculations.

            Parameters
            ----------
            window : int, offset, or BaseIndexer subclass
                Size of the moving window. This is the number of observations used for
                calculating the statistic. Each window will be a fixed size.

                If its an offset then this will be the time period of each window. Each
                window will be a variable sized based on the observations included in
                the time-period. This is only valid for datetimelike indexes.

                If a BaseIndexer subclass is passed, calculates the window boundaries
                based on the defined ``get_window_bounds`` method. Additional rolling
                keyword arguments, namely `min_periods`, `center`, and
                `closed` will be passed to `get_window_bounds`.
            min_periods : int, default None
                Minimum number of observations in window required to have a value
                (otherwise result is NA). For a window that is specified by an offset,
                `min_periods` will default to 1. Otherwise, `min_periods` will default
                to the size of the window.
            center : bool, default False
                Set the labels at the center of the window.
            win_type : str, default None
                Provide a window type. If ``None``, all points are evenly weighted.
                See the notes below for further information.
            on : str, optional
                For a DataFrame, a datetime-like column or MultiIndex level on which
                to calculate the rolling window, rather than the DataFrame's index.
                Provided integer column is ignored and excluded from result since
                an integer index is not used to calculate the rolling window.
            axis : int or str, default 0
            closed : str, default None
                Make the interval closed on the 'right', 'left', 'both' or
                'neither' endpoints.
                For offset-based windows, it defaults to 'right'.
                For fixed windows, defaults to 'both'. Remaining cases not implemented
                for fixed windows.

            Returns
            -------
            a Window or Rolling sub-classed for the particular operation

            See Also
            --------
            expanding : Provides expanding transformations.
            ewm : Provides exponential weighted functions.

            Notes
            -----
            By default, the result is set to the right edge of the window. This can be
            changed to the center of the window by setting ``center=True``.

            To learn more about the offsets & frequency strings, please see `this link
            <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.

            The recognized win_types are:

            * ``boxcar``
            * ``triang``
            * ``blackman``
            * ``hamming``
            * ``bartlett``
            * ``parzen``
            * ``bohman``
            * ``blackmanharris``
            * ``nuttall``
            * ``barthann``
            * ``kaiser`` (needs parameter: beta)
            * ``gaussian`` (needs parameter: std)
            * ``general_gaussian`` (needs parameters: power, width)
            * ``slepian`` (needs parameter: width)
            * ``exponential`` (needs parameter: tau), center is set to None.

            If ``win_type=None`` all points are evenly weighted. To learn more about
            different window types see `scipy.signal window functions
            <https://docs.scipy.org/doc/scipy/reference/signal.html#window-functions>`__.

            Certain window types require additional parameters to be passed. Please see
            the third example below on how to add the additional parameters.

            Examples
            --------
            >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
            >>> df
                 B
            0  0.0
            1  1.0
            2  2.0
            3  NaN
            4  4.0

            Rolling sum with a window length of 2, using the 'triang'
            window type.

            >>> df.rolling(2, win_type='triang').sum()
                 B
            0  NaN
            1  0.5
            2  1.5
            3  NaN
            4  NaN

            Rolling sum with a window length of 2, using the 'gaussian'
            window type (note how we need to specify std).

            >>> df.rolling(2, win_type='gaussian').sum(std=3)
                      B
            0       NaN
            1  0.986207
            2  2.958621
            3       NaN
            4       NaN

            Rolling sum with a window length of 2, min_periods defaults
            to the window length.

            >>> df.rolling(2).sum()
                 B
            0  NaN
            1  1.0
            2  3.0
            3  NaN
            4  NaN

            Same as above, but explicitly set the min_periods
            >>> df.rolling(2, min_periods=1).sum()
                 B
            0  0.0
            1  1.0
            2  3.0
            3  2.0
            4  4.0

            Same as above, but with forward-looking windows

            >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
            >>> df.rolling(window=indexer, min_periods=1).sum()
                 B
            0  1.0
            1  3.0
            2  2.0
            3  4.0
            4  4.0

            A ragged (meaning not-a-regular frequency), time-indexed DataFrame

            >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]},
            ...                   index = [pd.Timestamp('20130101 09:00:00'),
            ...                            pd.Timestamp('20130101 09:00:02'),
            ...                            pd.Timestamp('20130101 09:00:03'),
            ...                            pd.Timestamp('20130101 09:00:05'),
            ...                            pd.Timestamp('20130101 09:00:06')])

            >>> df
                                   B
            2013-01-01 09:00:00  0.0
            2013-01-01 09:00:02  1.0
            2013-01-01 09:00:03  2.0
            2013-01-01 09:00:05  NaN
            2013-01-01 09:00:06  4.0

            Contrasting to an integer rolling window, this will roll a variable
            length window corresponding to the time period.
            The default for min_periods is 1.

            >>> df.rolling('2s').sum()
                                   B
            2013-01-01 09:00:00  0.0
            2013-01-01 09:00:02  1.0
            2013-01-01 09:00:03  3.0
            2013-01-01 09:00:05  NaN
            2013-01-01 09:00:06  4.0

        """

        # FixedForwardWindowIndexer
        """
        class FixedForwardWindowIndexer(BaseIndexer)
         |  FixedForwardWindowIndexer(index_array: Union[numpy.ndarray, NoneType] = None, window_size: int = 0, **kwargs)
         |
         |  Creates window boundaries for fixed-length windows that include the
         |  current row.
         |
         |  Examples
         |  --------
         |  >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
         |  >>> df
         |       B
         |  0  0.0
         |  1  1.0
         |  2  2.0
         |  3  NaN
         |  4  4.0
         |
         |  >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
         |  >>> df.rolling(window=indexer, min_periods=1).sum()
         |       B
         |  0  1.0
         |  1  3.0
         |  2  2.0
         |  3  4.0
         |  4  4.0
         |
         |  Method resolution order:
         |      FixedForwardWindowIndexer
         |      BaseIndexer
         |      builtins.object
         |
         |  Methods defined here:
         |
         |  get_window_bounds(self, num_values: int = 0, min_periods: Union[int, NoneType] = None, center: Union[bool, NoneType] = None, closed: Union[str, NoneType] = None) -> Tuple[numpy.ndarr
        ay, numpy.ndarray]
         |      Computes the bounds of a window.
         |
         |      Parameters
         |      ----------
         |      num_values : int, default 0
         |          number of values that will be aggregated over
         |      window_size : int, default 0
         |          the number of rows in a window
         |      min_periods : int, default None
         |          min_periods passed from the top level rolling API
         |      center : bool, default None
         |          center passed from the top level rolling API
         |      closed : str, default None
         |          closed passed from the top level rolling API
         |      win_type : str, default None
         |          win_type passed from the top level rolling API
         |
         |      Returns
         |      -------
         |      A tuple of ndarray[int64]s, indicating the boundaries of each
         |      window
         |
         |  ----------------------------------------------------------------------
         |  Methods inherited from BaseIndexer:
         |
         |  __init__(self, index_array: Union[numpy.ndarray, NoneType] = None, window_size: int = 0, **kwargs)
         |      Parameters
         |      ----------
         |      **kwargs :
         |          keyword arguments that will be available when get_window_bounds is called
         |
         |  ----------------------------------------------------------------------
         |  Data descriptors inherited from BaseIndexer:
         |
         |  __dict__
         |      dictionary for instance variables (if defined)
         |
         |  __weakref__
         |      list of weak references to the object (if defined)
        """

        # VariableOffsetWindowIndexer
        """
        class VariableOffsetWindowIndexer(BaseIndexer)
         |  VariableOffsetWindowIndexer(index_array: Union[numpy.ndarray, NoneType] = None, 
                                        window_size: int = 0, 
                                        index=None, 
                                        offset=None, 
                                        **kwargs)
         |
         |  Calculate window boundaries based on a non-fixed offset such as a BusinessDay
         |
         |  Method resolution order:
         |      VariableOffsetWindowIndexer
         |      BaseIndexer
         |      builtins.object
         |
         |  Methods defined here:
         |
         |  __init__(self, index_array: Union[numpy.ndarray, NoneType] = None, window_size: int = 0, index=None, 
                     offset=None, **kwargs)
         |      Parameters
         |      ----------
         |      **kwargs :
         |          keyword arguments that will be available when get_window_bounds is called
         |
         |  get_window_bounds(self, num_values: int = 0, min_periods: Union[int, NoneType] = None, 
                              center: Union[bool, NoneType] = None, closed: Union[str, NoneType] = None)
                -> Tuple[numpy.ndarray, numpy.ndarray]
         |      
                Computes the bounds of a window.
         |
         |      Parameters
         |      ----------
         |      num_values : int, default 0
         |          number of values that will be aggregated over
         |      window_size : int, default 0
         |          the number of rows in a window
         |      min_periods : int, default None
         |          min_periods passed from the top level rolling API
         |      center : bool, default None
         |          center passed from the top level rolling API
         |      closed : str, default None
         |          closed passed from the top level rolling API
         |      win_type : str, default None
         |          win_type passed from the top level rolling API
         |
         |      Returns
         |      -------
         |      A tuple of ndarray[int64]s, indicating the boundaries of each window
         |
         |  ----------------------------------------------------------------------
         |  Data descriptors inherited from BaseIndexer:
         |
         |  __dict__
         |      dictionary for instance variables (if defined)
         |
         |  __weakref__
         |      list of weak references to the object (if defined)
        """

        # from pandas.tseries.offsets import CustomBusinessDay
        # from pandas.tseries.holiday import USFederalHolidayCalendar
        #
        # us_holiday_days = CustomBusinessDay(calendar=USFederalHolidayCalendar())
        #
        # df0 = pd.DataFrame({'value': np.arange(9)},
        #                    index=pd.date_range('2015-12-24', periods=9, freq=us_holiday_days))

        from pandas.tseries.offsets import DateOffset
        from pandas.tseries.offsets import Day

        index_date = pd.date_range('1/1/2020', periods=7, freq='2d')
        df = pd.DataFrame({'step': range(7),
                           'level': [1, 2, None, 3, 4, 5, 1],
                           'price': [1000, 2000, 3000, 4000, 5000, 6000, 0],
                           'class': pd.Categorical(['one', 'two', 'one', None, 'two', 'one', 'three']),
                           # 'wday': ['weekday-'+str(_d.isoweekday()) for _d in index_date],
                           'date': [pd.Timestamp('2010-1-1 00:00:01') + DateOffset(seconds=i*3 if i < 4 else i*4)
                                    for i in range(7)]
                           },
                          index=index_date,
                          )
        # FixedForwardWindowIndexer
        indexer_ff2 = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
        # VariableOffsetWindowIndexer
        indexer_vow = pd.api.indexers.VariableOffsetWindowIndexer(index=df.index, offset=pd.offsets.BDay(3))

        # note: 对于一些Pandas版本，数据集中不能含有非数字数据列，否则触发异常IndexError: list assignment index out of range
        print(
            f" >>> df\n"
            f"{df}\n"

            f"{'-'*80}\n"
            f"# 设定滚动窗口长度为3， 沿索引计算聚会函数\n"
            f"# 使用min_periods缺省设置None，窗口内数据最少为3个，少于3个则计算结果为NaN"
            f"# 使用center缺省设置False，窗口在当前数据位置上方\n"
            f" >>> df.rolling(window=3).mean()\n"
            f"{df.rolling(window=3).mean()}\n"
            f"# 设置窗口的中心点在当前数据位置\n"
            f" >>> df.rolling(window=3, center=True).mean()\n"
            f"{df.rolling(window=3, center=True).mean()}\n"

            f"{'-'*80}\n"
            f"# 设置on=date, 使用日期时间偏移量作为窗口滚动计算\n"
            f"# 注意：只能使用时间中的确定长度量作为偏移量，如天（days）,秒（seconds），\n"
            f"       有些时间单位被视为不确定量，不能使用，如month（每月的天数是不固定的）\n"
            
            f"# --- 沿索引使用日期时间偏移量4天长度滑动窗口\n"
            f" >>> from pd.tseries.offsets import Day\n"
            f" >>> df.rolling(window=Day(4)).sum()\n"
            f"{df.rolling(window=Day(4)).sum()}\n"
            # f"{df.rolling(window=DateOffset(days=4)).sum()}\n"
            # ValueError: <DateOffset: days=4> is a non-fixed frequency
            
            f"# --- 沿date列使用5秒长度滑动窗口\n"
            f" >>> df.rolling('5s', on='date').sum()\n"
            f"{df.rolling(window='5s', on='date').sum()}\n"
            # f"{df.rolling(window=Minute(1), on='date').sum()}\n"

            f"{'-'*80}\n"
            f"# 设置沿列class进行滚动计算\n"
            f" >>> df.rolling(window=3, on='date', min_periods=2).sum()\n"
            f"{df.rolling(window=3, on='date', min_periods=2).sum()}\n"

            f"{'-'*80}\n"
            f"# 使用agg设置多个聚合函数，不允许设置on\n"
            f" >>> df.rolling(window=3, on=None, min_periods=1).agg([np.mean, np.sum])\n"
            f"{df.rolling(window=3, on=None, min_periods=1).agg([np.mean, np.sum])}\n"

            f"{'-' * 80}\n"
            f"# 使用BaseIndex子类设置固定窗口, 窗口方向改为向前延申\n"
            f" >>> pd.api.indexers.FixedForwardWindowIndexer(window_size=2)\n"
            f" >>> df.rolling(window=indexer_ff2).sum()\n"
            f"{df.rolling(window=indexer_ff2).sum()}\n"

            f"{'-' * 80}\n"
            f"# 使用BaseIndex子类设置可变窗口\n"
            f" >>> indexer_vow = pd.api.indexers.VariableOffsetWindowIndexer(index=df.index, offset=pd.offsets.BDay(3))\n"
            f" >>> df.rolling(window=indexer_vow).sum()\n"
            f"{df.rolling(window=indexer_vow).sum()}\n"
            f"{[_d.isoweekday() for _d in df.index]}\n"
            f"{df}"
        )

        return

    def st_gp_expanding(self):
        """
        expanding(self, min_periods: 'int' = 1, center: 'Optional[bool_t]' = None, axis: 'Axis' = 0)
            -> 'Expanding'

            Provide expanding transformations.

            Parameters
            ----------
            min_periods : int, default 1
                Minimum number of observations in window required to have a value
                (otherwise result is NA).
            center : bool, default False
                Set the labels at the center of the window.
            axis : int or str, default 0

            Returns
            -------
            a Window sub-classed for the particular operation

            See Also
            --------
            rolling : Provides rolling window calculations.
            ewm : Provides exponential weighted functions.

            Notes
            -----
            By default, the result is set to the right edge of the window. This can be
            changed to the center of the window by setting ``center=True``.

            Examples
            --------
            >>> df = pd.DataFrame({"B": [0, 1, 2, np.nan, 4]})
            >>> df
                 B
            0  0.0
            1  1.0
            2  2.0
            3  NaN
            4  4.0

            >>> df.expanding(2).sum()
                 B
            0  NaN
            1  1.0
            2  3.0
            3  3.0
            4  7.0
        """

        df = pd.DataFrame(
            {'num': range(1, 8),
             'price': [1000, 2000, 3000, 4000, 5000, 6000, 7000],
             'cost': [100, 200, 1000, 2000, 3000, 4000, 100],
             'sales': [10, 12, 30, 41, 15, 16, 22]
             }
            )
        df1 = pd.DataFrame([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]],
                           columns=["col1", "col2", "col3", "col4"])
        print(
            f" >>> df\n"
            f"{df}\n"

            f"{'-'*80}\n"            
            f"# 窗口中最小数据量为1，等同于cum方法\n"
            f" >>> df.expanding(1).sum()\n"
            f"{df.expanding(1).sum()}\n"
            
            f"{'-'*80}\n"            
            f"# 窗口中最小数据量为2，第一行为NaN\n"
            f" >>> df.expanding(2).sum()\n"
            f"{df.expanding(2).sum()}\n"
            
            # f"{'-'*80}\n"
            # f"# 窗口中最小数据量为3，窗口中心设为当前位置（该参数未来版本将去除）\n"
            # f" >>> df.expanding(3, center=True).sum()\n"
            # f"{df.expanding(3, center=True).sum()}\n"
            
            f"{'-'*80}\n"            
            f"# 窗口中最小数据量为2，沿轴向1进行计算，NaN值累计\n"
            f" >>> df.expanding(2, axis=1).sum()\n"
            f"{df.expanding(2, axis=1).sum()}\n"

            f"{'-' * 80}\n"
        )

        # print(f"{df1.expanding(2, axis=1).sum()}")

        return df

    def st_gp_groupby(self):
        """
        groupby(self, by=None, axis=0, level=None, as_index: bool = True, sort: bool = True, group_keys: bool = True,
                squeeze: bool = <object object at 0x00000255B1F4E680>, observed: bool = False, dropna: bool = True)
                -> 'DataFrameGroupBy'

            Group DataFrame using a mapper or by a Series of columns.

            A groupby operation involves some combination of splitting the
            object, applying a function, and combining the results. This can be
            used to group large amounts of data and compute operations on these
            groups.

            Parameters
            ----------
            by : mapping, function, label, or list of labels
                Used to determine the groups for the groupby.
                If ``by`` is a function, it's called on each value of the object's
                index. If a dict or Series is passed, the Series or dict VALUES
                will be used to determine the groups (the Series' values are first
                aligned; see ``.align()`` method). If an ndarray is passed, the
                values are used as-is determine the groups. A label or list of
                labels may be passed to group by the columns in ``self``. Notice
                that a tuple is interpreted as a (single) key.
            axis : {0 or 'index', 1 or 'columns'}, default 0
                Split along rows (0) or columns (1).
            level : int, level name, or sequence of such, default None
                If the axis is a MultiIndex (hierarchical), group by a particular
                level or levels.
            as_index : bool, default True
                For aggregated output, return object with group labels as the
                index. Only relevant for DataFrame input. as_index=False is
                effectively "SQL-style" grouped output.
            sort : bool, default True
                Sort group keys. Get better performance by turning this off.
                Note this does not influence the order of observations within each
                group. Groupby preserves the order of rows within each group.
            group_keys : bool, default True
                When calling apply, add group keys to index to identify pieces.
            squeeze : bool, default False
                Reduce the dimensionality of the return type if possible,
                otherwise return a consistent type.

                .. deprecated:: 1.1.0

            observed : bool, default False
                This only applies if any of the gp are Categoricals.
                If True: only show observed values for categorical gp.
                If False: show all values for categorical gp.

                .. versionadded:: 0.23.0
            dropna : bool, default True
                If True, and if group keys contain NA values, NA values together
                with row/column will be dropped.
                If False, NA values will also be treated as the key in groups

                .. versionadded:: 1.1.0

            Returns
            -------
            DataFrameGroupBy
                Returns a groupby object that contains information about the groups.

            See Also
            --------
            resample : Convenience method for frequency conversion and resampling
                of time series.

            Notes
            -----
            See the `user guide
            <https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.

            Examples
            --------
            >>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
            ...                               'Parrot', 'Parrot'],
            ...                    'Max Speed': [380., 370., 24., 26.]})
            >>> df
               Animal  Max Speed
            0  Falcon      380.0
           1  Falcon      370.0
            2  Parrot       24.0
            3  Parrot       26.0
            >>> df.groupby(['Animal']).mean()
                    Max Speed
            Animal
            Falcon      375.0
            Parrot       25.0

            **Hierarchical Indexes**

            We can groupby different levels of a hierarchical index
            using the `level` parameter:

            >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
            ...           ['Captive', 'Wild', 'Captive', 'Wild']]
            >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
            >>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
            ...                   index=index)
            >>> df
                            Max Speed
            Animal Type
            Falcon Captive      390.0
                   Wild         350.0
            Parrot Captive       30.0
                   Wild          20.0
            >>> df.groupby(level=0).mean()
                    Max Speed
            Animal
            Falcon      370.0
            Parrot       25.0
            >>> df.groupby(level="Type").mean()
                     Max Speed
            Type
            Captive      210.0
            Wild         185.0

            We can also choose to include NA in group keys or not by setting
            `dropna` parameter, the default setting is `True`:

            >>> l = [[1, 2, 3], [1, None, 4], [2, 1, 3], [1, 2, 2]]
            >>> df = pd.DataFrame(l, columns=["a", "b", "c"])

            >>> df.groupby(by=["b"]).sum()
                a   c
            b
            1.0 2   3
            2.0 2   5

            >>> df.groupby(by=["b"], dropna=False).sum()
                a   c
            b
            1.0 2   3
            2.0 2   5
            NaN 1   4

            >>> l = [["a", 12, 12], [None, 12.3, 33.], ["b", 12.3, 123], ["a", 1, 1]]
            >>> df = pd.DataFrame(l, columns=["a", "b", "c"])

            >>> df.groupby(by="a").sum()
                b     c
            a
            a   13.0   13.0
            b   12.3  123.0

            >>> df.groupby(by="a", dropna=False).sum()
                b     c
            a
            a   13.0   13.0
            b   12.3  123.0
            NaN 12.3   33.0
        """

        # groupby for Series
        """
        groupby(self, by=None, axis=0, level=None, as_index: bool = True, sort: bool = True, group_keys: bool = True, 
                squeeze: bool = <object object at 0x00000255B1F4E680>, observed: bool = False, dropna: bool = True)
                 -> 'SeriesGroupBy'
            
            Group Series using a mapper or by a Series of columns.
        
            A groupby operation involves some combination of splitting the
            object, applying a function, and combining the results. This can be
            used to group large amounts of data and compute operations on these
            groups.
        
            Parameters
            ----------
            by : mapping, function, label, or list of labels
                Used to determine the groups for the groupby.
                If ``by`` is a function, it's called on each value of the object's
                index. If a dict or Series is passed, the Series or dict VALUES
                will be used to determine the groups (the Series' values are first
                aligned; see ``.align()`` method). If an ndarray is passed, the
                values are used as-is determine the groups. A label or list of
                labels may be passed to group by the columns in ``self``. Notice
                that a tuple is interpreted as a (single) key.
            axis : {0 or 'index', 1 or 'columns'}, default 0
                Split along rows (0) or columns (1).
            level : int, level name, or sequence of such, default None
                If the axis is a MultiIndex (hierarchical), group by a particular
                level or levels.
            as_index : bool, default True
                For aggregated output, return object with group labels as the
                index. Only relevant for DataFrame input. as_index=False is
                effectively "SQL-style" grouped output.
            sort : bool, default True
                Sort group keys. Get better performance by turning this off.
                Note this does not influence the order of observations within each
                group. Groupby preserves the order of rows within each group.
            group_keys : bool, default True
                When calling apply, add group keys to index to identify pieces.
            squeeze : bool, default False
                Reduce the dimensionality of the return type if possible,
                otherwise return a consistent type.
        
                .. deprecated:: 1.1.0
        
            observed : bool, default False
                This only applies if any of the gp are Categoricals.
                If True: only show observed values for categorical gp.
                If False: show all values for categorical gp.
        
                .. versionadded:: 0.23.0
            dropna : bool, default True
                If True, and if group keys contain NA values, NA values together
                with row/column will be dropped.
                If False, NA values will also be treated as the key in groups
        
                .. versionadded:: 1.1.0
        
            Returns
            -------
            SeriesGroupBy
                Returns a groupby object that contains information about the groups.
        
            See Also
            --------
            resample : Convenience method for frequency conversion and resampling
                of time series.
        
            Notes
            -----
            See the `user guide
            <https://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
        
            Examples
            --------
            >>> ser = pd.Series([390., 350., 30., 20.],
            ...                 index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed")
            >>> ser
            Falcon    390.0
            Falcon    350.0
            Parrot     30.0
            Parrot     20.0
            Name: Max Speed, dtype: float64
            >>> ser.groupby(["a", "b", "a", "b"]).mean()
            a    210.0
            b    185.0
            Name: Max Speed, dtype: float64
            >>> ser.groupby(level=0).mean()
            Falcon    370.0
            Parrot     25.0
            Name: Max Speed, dtype: float64
            >>> ser.groupby(ser > 100).mean()
            Max Speed
            False     25.0
            True     370.0
            Name: Max Speed, dtype: float64
        
            **Grouping by Indexes**
        
            We can groupby different levels of a hierarchical index
            using the `level` parameter:
        
            >>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
            ...           ['Captive', 'Wild', 'Captive', 'Wild']]
            >>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
            >>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed")
            >>> ser
            Animal  Type
            Falcon  Captive    390.0
                    Wild       350.0
            Parrot  Captive     30.0
                    Wild        20.0
            Name: Max Speed, dtype: float64
            >>> ser.groupby(level=0).mean()
            Animal
            Falcon    370.0
            Parrot     25.0
            Name: Max Speed, dtype: float64
            >>> ser.groupby(level="Type").mean()
            Type
            Captive    210.0
            Wild       185.0
            Name: Max Speed, dtype: float64
        
            We can also choose to include `NA` in group keys or not by defining
            `dropna` parameter, the default setting is `True`:
        
            >>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan])
            >>> ser.groupby(level=0).sum()
            a    3
            b    3
            dtype: int64
        
            >>> ser.groupby(level=0, dropna=False).sum()
            a    3
            b    3
            NaN  3
            dtype: int64
        
            >>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot']
            >>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed")
            >>> ser.groupby(["a", "b", "a", np.nan]).mean()
            a    210.0
            b    350.0
            Name: Max Speed, dtype: float64
        
            >>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean()
            a    210.0
            b    350.0
            NaN   20.0
            Name: Max Speed, dtype: float64
        """

        # import faker
        import random

        # fk = faker.Faker('zh_CN')
        df = pd.DataFrame(
            {'sid': ['{:04d}'.format(j+1) for j in range(10)],
             'age': np.random.randint(18, 20, 10),
             'home': pd.Categorical([random.choice(['far', 'near', 'local']) for j in range(10)],
                                    categories=(['far', 'near', 'close', 'local'])
                                    ),
             # 'class': np.random.randint(1, 4, 10),
             'math1': np.random.randint(50, 95, (10,)),
             'math2': np.random.randint(50, 95, (10,)),
             'science': np.random.randint(30, 100, (10,)),
             'art': np.random.randint(50, 95, (10,)),
             },
            index=pd.MultiIndex.from_product([['101', '102'], list(range(5))])
            )
        df['total'] = df.math1 + df.math2 + df.science + df.art
        df.index.names = ['class', 'seat']

        df2 = pd.DataFrame([[0, 3, 0, 1], [2, 3, 5, 1]], columns=list('ABCD'))

        gp = df.groupby(by='home')[['math1', 'math2', 'total']]

        myfun = lambda x: x[1] % 2

        def key_fun(x):
            cat_dict = {'far': 'f', 'near': 'n', 'close': 'c', 'local': 'l'}
            return cat_dict.get(x, 'f')

        print(
            f"{df.info()}\n"
            "# 示例数据集：\n"
            "#   sid为字符类型，home为枚举类型，"
            "    age,math1,math2,science,total为数字值类型\n"
            "#   索引: (class,seat)\n"
            " >>> df\n"
            f"{df}\n"

            f"{'-' * 80}\n"
            "# 按照class(索引层次0)产生数据分组， 聚合计算针对math1、math2、total等三个数据列\n"
            " >>> gp = df.groupby(by='home')[['math1', 'math2', 'total']]\n"
            " >>> gp\n"
            f"{gp}\n"
            
            "# 查看分组数据中的分组数量、分组键\n"
            " >>> '分组数量：{}, 分组名称：{}'.format(gp.ngroups, gp.groups.keys())\n"
            f"{'分组数量：{},'.format(gp.ngroups)}\n"
            f"{'分组名称：{}'.format(gp.groups.keys())}\n"
            
            "# 遍历分组数据（方式一）\n"
            " >>> for name, data in gp:\n"
            "         print(name)\n"
            "         print(data)\n"
            f"{[(name, data) for name, data in gp]}\n"
            
            "# 遍历分组数据（方式二）\n"
            " >>> for name in gp.groups:\n"
            "         print(name)\n"
            "         print(gp.get_group(name))\n"
            f"{[(name, data) for name, data in gp]}\n"

            "# 针对分组数据，调用聚合方法\n"
            " >>> gp.max()\n"
            f"{gp.mean()}\n"
            " >>> gp.agg([max, min])\n"
            f"{gp.agg([max, min])}\n"

            f"{'-' * 80}\n"
            "# 按照枚举类型分组\n"
            " >>> df.groupby(by='home').agg({'science': max, 'total': np.mean})\n"
            f"{df.groupby(by='home').agg({'science': max, 'total': np.mean})}\n"
            
            f"{'-' * 80}\n"
            "# 使用apply调用分组数据集, apply的输入数据集为每个分组数据集\n"
            "# --- 设置group_keys=True可以为每个分组数据集配置分组键\n"
            " >>> df.groupby(by='home', group_keys=False).apply(lambda x: x.head(2)\n"
            f"{df.groupby(by='home', group_keys=False).apply(lambda x: x.head(1))}\n"
            " >>> df.groupby(by='home', group_keys=True).apply(lambda x: x.head(2)\n"
            f"{df.groupby(by='home', group_keys=True).apply(lambda x: x.head(2))}\n"
            
            f"{'-' * 80}\n"
            "# 设置observed=True，可以过滤没有数据的枚举类型分组标签值\n"
            " >>> df.groupby(by='home').max()\n"
            f"{df.groupby(by='home').max()}\n"
            " >>> df.groupby(by='home', observed=True).max()\n"
            f"{df.groupby(by='home', observed=True).max()}\n"

            f"{'-'*80}\n"
            "# 在列向进行分组，需要注意数据类型的一致性（非数字数值不能进行聚合计算）\n"
            "# --- 根据0行(loc[0,:])对列进行分组，具有相同值的列分为一个组\n"
            "# --- 0行中有三个不同值：0，1，3，相应的列分为三组： 0:{A, C}，1:{D}，3:{B}\n"
            "# --- 列分组后形成新的列：0，1，3，结果数据中，每行的值为对分组中列值求max()\n"
            " >>> df2\n"
            f"{df2}\n"
            " >>> df2.groupby(by=df2.loc[0, :], axis=1).max()\n"
            f"{df2.groupby(by=df2.loc[0, :], axis=1).max()}\n"
            # " >>> df[['math1', 'math2', 'science', 'art']].groupby(by=df.loc[('class-1', 1)], axis=1).max()\n"
            # f"{df[['math1', 'math2', 'science', 'art']].groupby(by=df.loc[('class-1', 1)], axis=1).max()}\n"
            
            "# 在列向进行分组，可以使用字典设置分组名称\n"
            " >>> df2.groupby(by={'A': 0, 'B': 0, 'C': 1, 'D': 1}, axis=1).max()\n"
            f"{df2.groupby(by={'A': 0, 'B': 0, 'C': 1, 'D': 1}, axis=1).max()}\n"
            # " >>> df[['math1', 'math2']].groupby(by={'math1': 'math', 'math2': 'math'}, axis=1).sum()\n"
            # f"{df[['math1', 'math2']].groupby(by={'math1': 'math', 'math2': 'math'}, axis=1).sum()}\n"

            f"{'-' * 80}\n"
            "# 使用数组进行直接分组, 数组长度等于数据集长度, 根据数组中的值分组\n"
            "# 在df2中，使用数组在列向分组，将ABD分为0组，C分为1组\n"
            " >>> df2.groupby(by=[0, 0, 1, 0], axis=1).max()\n"
            f"{df2.groupby(by=[0, 0, 0, 1], axis=1).max()}\n"
            # " >>> df.groupby(by=[0, 0, 0, 1, 1, 2, 2, 3, 3, 0]).max()\n"
            # f"{df.groupby(by=[0, 0, 0, 1, 1, 2, 2, 3, 3, 0]).max()}\n\n"
            
            "# 混合使用数组、函数、数据列、索引的组合进行分组，分组组合为一个多层索引\n"
            "# --- 注意，设置函数时，其输入参数为数据集的索引\n"
            "# --- 使用数组、索引层class、函数、列age形成分组，数组和函数没有层次名称\n"
            " >>> myfun = lambda x: x[1] % 2\n"
            " >>> df.groupby(by=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 0], 'class', myfun, 'age']).mean()\n"
            f"{df.groupby(by=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 0], 'class', myfun, 'age']).mean()}\n"
            )

        return df, df2

    def st_gp_cut(self):
        """
        cut(x, bins, right: bool = True, labels=None, retbins: bool = False, precision: int = 3,
            include_lowest: bool = False, duplicates: str = 'raise', ordered: bool = True)

            Bin values into discrete intervals.

            Use `cut` when you need to segment and sort data values into bins. This
            function is also useful for going from a continuous variable to a
            categorical variable. For example, `cut` could convert ages to groups of
            age ranges. Supports binning into an equal number of bins, or a
            pre-specified array of bins.

            Parameters
            ----------
            x : array-like
                The input array to be binned. Must be 1-dimensional.
            bins : int, sequence of scalars, or IntervalIndex
                The criteria to bin by.
                * int : Defines the number of equal-width bins in the range of `x`. The
                  range of `x` is extended by .1% on each side to include the minimum
                  and maximum values of `x`.
                * sequence of scalars : Defines the bin edges allowing for non-uniform
                  width. No extension of the range of `x` is done.
                * IntervalIndex : Defines the exact bins to be used. Note that
                  IntervalIndex for `bins` must be non-overlapping.
            right : bool, default True
                Indicates whether `bins` includes the rightmost edge or not. If
                ``right == True`` (the default), then the `bins` ``[1, 2, 3, 4]``
                indicate (1,2], (2,3], (3,4]. This argument is ignored when
                `bins` is an IntervalIndex.
            labels : array or False, default None
                Specifies the labels for the returned bins. Must be the same length as
                the resulting bins. If False, returns only integer indicators of the
                bins. This affects the type of the output container (see below).
                This argument is ignored when `bins` is an IntervalIndex. If True,
                raises an error. When `ordered=False`, labels must be provided.
            retbins : bool, default False
                Whether to return the bins or not. Useful when bins is provided
                as a scalar.
            precision : int, default 3
                The precision at which to store and display the bins labels.
            include_lowest : bool, default False
                Whether the first interval should be left-inclusive or not.
            duplicates : {default 'raise', 'drop'}, optional
                If bin edges are not unique, raise ValueError or drop non-uniques.
            ordered : bool, default True
                Whether the labels are ordered or not. Applies to returned types
                Categorical and Series (with Categorical dtype). If True,
                the resulting categorical will be ordered. If False, the resulting
                categorical will be unordered (labels must be provided).

                .. versionadded:: 1.1.0

            Returns
            -------
            out : Categorical, Series, or ndarray
                An array-like object representing the respective bin for each value
                of `x`. The type depends on the value of `labels`.

                * True (default) : returns a Series for Series `x` or a
                  Categorical for all other inputs. The values stored within
                  are Interval dtype.

                * sequence of scalars : returns a Series for Series `x` or a
                  Categorical for all other inputs. The values stored within
                  are whatever the type in the sequence is.

                * False : returns an ndarray of integers.

            bins : numpy.ndarray or IntervalIndex.
                The computed or specified bins. Only returned when `retbins=True`.
                For scalar or sequence `bins`, this is an ndarray with the computed
                bins. If set `duplicates=drop`, `bins` will drop non-unique bin. For
                an IntervalIndex `bins`, this is equal to `bins`.

            See Also
            --------
            qcut : Discretize variable into equal-sized buckets based on rank
                or based on sample quantiles.
            Categorical : Array type for storing data that come from a
                fixed set of values.
            Series : One-dimensional array with axis labels (including time series).
            IntervalIndex : Immutable Index implementing an ordered, sliceable set.

            Notes
            -----
            Any NA values will be NA in the result. Out of bounds values will be NA in
            the resulting Series or Categorical object.
            Examples
            --------
            Discretize into three equal-sized bins.

            >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3)
            ... # doctest: +ELLIPSIS
            [(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
            Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...

            >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3, retbins=True)
            ... # doctest: +ELLIPSIS
            ([(0.994, 3.0], (5.0, 7.0], (3.0, 5.0], (3.0, 5.0], (5.0, 7.0], ...
            Categories (3, interval[float64]): [(0.994, 3.0] < (3.0, 5.0] ...
            array([0.994, 3.   , 5.   , 7.   ]))

            Discovers the same bins, but assign them specific labels. Notice that
            the returned Categorical's categories are `labels` and is ordered.
            >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]),
            ...        3, labels=["bad", "medium", "good"])
            ['bad', 'good', 'medium', 'medium', 'good', 'bad']
            Categories (3, object): ['bad' < 'medium' < 'good']

            ``ordered=False`` will result in unordered categories when labels are passed.
            This parameter can be used to allow non-unique labels:

            >>> pd.cut(np.array([1, 7, 5, 4, 6, 3]), 3,
            ...        labels=["B", "A", "B"], ordered=False)
            ['B', 'B', 'A', 'A', 'B', 'B']
            Categories (2, object): ['A', 'B']

            ``labels=False`` implies you just want the bins back.

            >>> pd.cut([0, 1, 1, 2], bins=4, labels=False)
            array([0, 1, 1, 3])
            Passing a Series as an input returns a Series with categorical dtype:

            >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
            ...               index=['a', 'b', 'c', 'd', 'e'])
            >>> pd.cut(s, 3)
            ... # doctest: +ELLIPSIS
            a    (1.992, 4.667]
            b    (1.992, 4.667]
            c    (4.667, 7.333]
            d     (7.333, 10.0]
            e     (7.333, 10.0]
            dtype: category
            Categories (3, interval[float64]): [(1.992, 4.667] < (4.667, ...

            Passing a Series as an input returns a Series with mapping value.
            It is used to map numerically to intervals based on bins.

            >>> s = pd.Series(np.array([2, 4, 6, 8, 10]),
            ...               index=['a', 'b', 'c', 'd', 'e'])
            >>> pd.cut(s, [0, 2, 4, 6, 8, 10], labels=False, retbins=True, right=False)
            ... # doctest: +ELLIPSIS
            (a    1.0
             b    2.0
             c    3.0
             d    4.0
             e    NaN
             dtype: float64,
             array([ 0,  2,  4,  6,  8, 10]))

            Use `drop` optional when bins is not unique

            >>> pd.cut(s, [0, 2, 4, 6, 10, 10], labels=False, retbins=True,
            ...        right=False, duplicates='drop')
            ... # doctest: +ELLIPSIS
            (a    1.0
             b    2.0
             c    3.0
             d    3.0
             e    NaN
             dtype: float64,
             array([ 0,  2,  4,  6, 10]))

            Passing an IntervalIndex for `bins` results in those categories exactly.
            Notice that values not covered by the IntervalIndex are set to NaN. 0
            is to the left of the first bin (which is closed on the right), and 1.5
            falls between two bins.

            >>> bins = pd.IntervalIndex.from_tuples([(0, 1), (2, 3), (4, 5)])
            >>> pd.cut([0, 0.5, 1.5, 2.5, 4.5], bins)
            [NaN, (0.0, 1.0], NaN, (2.0, 3.0], (4.0, 5.0]]
            Categories (3, interval[int64]): [(0, 1] < (2, 3] < (4, 5]]
        """
        import random

        sr = pd.Series([1, 5, 3, 7, 1, 9], index=range(1, 7))
        # idx_interval = pd.IntervalIndex(
        #     data=[
        #         pd.Interval(0, 0.99, closed='both'),
        #         pd.Interval(1, 2.99, closed='both'),
        #         pd.Interval(3, 9, closed='both'),
        #         ])

        print(
            f" >>> sr\n"
            f"{sr}\n"
            
            f"{'-'*80}\n"
            f"# 使用缺省设置，划分3个区间，区间左开右闭，精度为3位\n"
            f" >>> pd.cut(sr, 3)\n"
            f"{pd.cut(sr, 3)}\n"
            
            f"{'-'*80}\n"
            f"# 设置返回分组bins， 精度2位\n"
            f" >>> pd.cut(sr, 3, retbins=True, precision=2)\n"
            f"{pd.cut(sr, 3, retbins=True, precision=2)}\n"

            f"{'-'*80}\n"
            f"# 设置不包括左边端点值，区间的左端点取值为开，数据1被置为NaN\n"
            f" >>> pd.cut(sr, [1, 3, 8, 9], include_lowest=False)\n"
            f"{pd.cut(sr, [1, 3, 8, 9], include_lowest=False)}\n"
            f"# 设置包括左边端点值，区间的左端点取值被降低\n"
            f" >>> pd.cut(sr, [1, 3, 8, 9], include_lowest=True)\n"
            f"{pd.cut(sr, [1, 3, 8, 9], include_lowest=True)}\n"
            
            f"{'-'*80}\n"
            f"# 设置duplicates=‘drop’，去除重复的端点\n"
            f" >>> pd.cut(x=sr, bins=[0, 1, 5, 5, 9], duplicates='drop')\n"
            f"{pd.cut(x=sr, bins=[0, 1, 5, 5, 9], duplicates='drop')}\n"

            f"{'-'*80}\n"
            f"# 设置bins为区间索引进行分组\n"
            f" >>> pd.cut(x=sr, bins=pd.IntervalIndex.from_tuples(((1, 3), (3, 8), (8, 9))))\n"
            f"{pd.cut(x=sr, bins=pd.IntervalIndex.from_tuples(((1, 3), (3, 8), (8, 9))))}\n"
            
            f"{'-'*80}\n"
            f" >>> pd.cut(x=sr, bins=[0, 1, 5, 5, 9], duplicates='raise')\n"
            # f"{pd.cut(x=sr, bins=[0, 1, 5, 5, 9], duplicates='raise')}\n"
            "ValueError: Bin edges must be unique: array([0, 1, 5, 5, 9]).\n"
            "You can drop duplicate edges by setting the 'duplicates' kwarg\n"
            
            f"{'-'*80}\n"
            f"# 设置labels作为分组标签序列，使用ordered排序分组\n"
            f" >>> pd.cut(x=[5, 3, 8, 1, 0], bins=3, labels=['small', 'mid', 'big'], ordered=True)\n"
            f"{pd.cut(x=[5, 3, 8, 1, 0], bins=3, labels=['small', 'mid', 'big'], ordered=True)}\n"
            f" >>> pd.cut(x=[5, 3, 8, 1, 0], bins=3, labels=['small', 'mid', 'big'], ordered=False)\n"
            f"{pd.cut(x=[5, 3, 8, 1, 0], bins=3, labels=['small', 'mid', 'big'], ordered=False)}\n"
            )
        return sr

    def st_gp_qcut(self):
        """
        qcut(x, q, labels=None, retbins: bool = False, precision: int = 3, duplicates: str = 'raise')
            Quantile-based discretization function.

            Discretize variable into equal-sized buckets based on rank or based
            on sample quantiles. For example 1000 values for 10 quantiles would
            produce a Categorical object indicating quantile membership for each data point.

            Parameters
            ----------
            x : 1d ndarray or Series
            q : int or list-like of float
                Number of quantiles. 10 for deciles, 4 for quartiles, etc. Alternately
                array of quantiles, e.g. [0, .25, .5, .75, 1.] for quartiles.
            labels : array or False, default None
                Used as labels for the resulting bins. Must be of the same length as
                the resulting bins. If False, return only integer indicators of the
                bins. If True, raises an error.
            retbins : bool, optional
                Whether to return the (bins, labels) or not. Can be useful if bins
                is given as a scalar.
            precision : int, optional
                The precision at which to store and display the bins labels.
            duplicates : {default 'raise', 'drop'}, optional
                If bin edges are not unique, raise ValueError or drop non-uniques.

            Returns
            -------
            out : Categorical or Series or array of integers if labels is False
                The return type (Categorical or Series) depends on the input: a Series
                of type category if input is a Series else Categorical. Bins are
                represented as categories when categorical data is returned.
            bins : ndarray of floats
                Returned only if `retbins` is True.

            Notes
            -----
            Out of bounds values will be NA in the resulting Categorical object

            Examples
            --------
            >>> pd.qcut(range(5), 4)
            ... # doctest: +ELLIPSIS
            [(-0.001, 1.0], (-0.001, 1.0], (1.0, 2.0], (2.0, 3.0], (3.0, 4.0]]
            Categories (4, interval[float64]): [(-0.001, 1.0] < (1.0, 2.0] ...

            >>> pd.qcut(range(5), 3, labels=["good", "medium", "bad"])
            ... # doctest: +SKIP
            [good, good, medium, bad, bad]
            Categories (3, object): [good < medium < bad]

            >>> pd.qcut(range(5), 4, labels=False)
            array([0, 0, 1, 2, 3])
        """

        df = pd.DataFrame({'a': [1, 3, 5, 8, 9],
                           'b': [3, None, 9, 3, 1]
                           })
        df1 = df.copy()
        df1.loc[:, 'a-level'] = pd.qcut(x=df.a, q=3)

        print(
            f" >>> df\n"
            f"{df}\n"
            
            f"# 使用单调增长的比例序列设置进行分组计算\n"
            f" >>> pd.qcut(x=df.a, q=[0, 0.1, 0.3, 1.0])\n"
            f"{pd.qcut(x=df.a, q=[0, 0.1, 0.3, 1.0])}\n"
            
            f"# 将数据列b分为三个分组，使用duplicate=‘drop’可以忽略NaN值数据\n"
            f" >>> pd.qcut(x=df.b, q=3, duplicates='drop')\n"
            f"{pd.qcut(x=df.b, q=3, duplicates='drop', retbins=True)}\n"
            
            f"# 输入数据为列表，输出结果为枚举类型序列\n"
            f" >>> pd.qcut(x=list(df.a), q=3)\n"
            f"{pd.qcut(x=list(df.a), q=3)}\n"
            
            f"# 增加数据列a的分组标签列a-level\n"
            f" >>> df['a-level'] = pd.qcut(x=df.a, q=3)\n"
            f" >>> df\n"
            f"{df1}\n"
        )
        return

    def st_pt_pivot(self):
        """
        pivot(self, index=None, columns=None, values=None) -> 'DataFrame'
            Return reshaped DataFrame organized by given index / column values.

            Reshape data (produce a "pivot" table) based on column values. Uses
            unique values from specified `index` / `columns` to form axes of the
            resulting DataFrame. This function does not support data
            aggregation, multiple values will result in a MultiIndex in the
            columns. See the :obj:`User Guide <reshaping>` for more on reshaping.

            Parameters
            ----------
            index : str or object or a list of str, optional
                Column to use to make new frame's index. If None, uses
                existing index.

                .. versionchanged:: 1.1.0
                   Also accept list of index names.

            columns : str or object or a list of str
                Column to use to make new frame's columns.

                .. versionchanged:: 1.1.0
                   Also accept list of columns names.
            values : str, object or a list of the previous, optional
                Column(s) to use for populating new frame's values. If not
                specified, all remaining columns will be used and the result will
                have hierarchically indexed columns.

                .. versionchanged:: 0.23.0
                   Also accept list of column names.

            Returns
            -------
            DataFrame
                Returns reshaped DataFrame.

            Raises
            ------
            ValueError:
                When there are any `index`, `columns` combinations with multiple
                values. `DataFrame.pivot_table` when you need to aggregate.

            See Also
            --------
            DataFrame.pivot_table : Generalization of pivot that can handle
                duplicate values for one index/column pair.
            DataFrame.unstack : Pivot based on the index values instead of a
                column.
            Notes
            -----
            For finer-tuned control, see hierarchical indexing documentation along
            with the related stack/unstack methods.

            Examples
            --------
            >>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
            ...                            'two'],
            ...                    'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
            ...                    'baz': [1, 2, 3, 4, 5, 6],
            ...                    'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
            >>> df
                foo   bar  baz  zoo
            0   one   A    1    x
            1   one   B    2    y
            2   one   C    3    z
            3   two   A    4    q
            4   two   B    5    w
            5   two   C    6    t

            >>> df.pivot(index='foo', columns='bar', values='baz')
            bar  A   B   C
            foo
            one  1   2   3
            two  4   5   6

            >>> df.pivot(index='foo', columns='bar')['baz']
            bar  A   B   C
            foo
            one  1   2   3
            two  4   5   6

            >>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
                  baz       zoo
            bar   A  B  C   A  B  C
            foo
            one   1  2  3   x  y  z
            two   4  5  6   q  w  t

            You could also assign a list of column names or a list of index names.

            >>> df = pd.DataFrame({
            ...        "lev1": [1, 1, 1, 2, 2, 2],
            ...        "lev2": [1, 1, 2, 1, 1, 2],
            ...        "lev3": [1, 2, 1, 2, 1, 2],
            ...        "lev4": [1, 2, 3, 4, 5, 6],
            ...        "values": [0, 1, 2, 3, 4, 5]})
            >>> df
                lev1 lev2 lev3 lev4 values
            0   1    1    1    1    0
            1   1    1    2    2    1
            2   1    2    1    3    2
            3   2    1    2    4    3
            4   2    1    1    5    4
            5   2    2    2    6    5

            >>> df.pivot(index="lev1", columns=["lev2", "lev3"],values="values")
            lev2    1         2
            lev3    1    2    1    2
            lev1
            1     0.0  1.0  2.0  NaN
            2     4.0  3.0  NaN  5.0

            >>> df.pivot(index=["lev1", "lev2"], columns=["lev3"],values="values")
                  lev3    1    2
            lev1  lev2
               1     1  0.0  1.0
                     2  2.0  NaN
               2     1  4.0  3.0
                     2  NaN  5.0

            A ValueError is raised if there are any duplicates.

            >>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
            ...                    "bar": ['A', 'A', 'B', 'C'],
            ...                    "baz": [1, 2, 3, 4]})
            >>> df
               foo bar  baz
            0  one   A    1
            1  one   A    2
            2  two   B    3
            3  two   C    4

            Notice that the first two rows are the same for our `index`
            and `columns` arguments.

            >>> df.pivot(index='foo', columns='bar', values='baz')
            Traceback (most recent call last):
               ...
            ValueError: Index contains duplicate entries, cannot reshape
        """

        day_offset = pd.offsets.DateOffset(days=1)

        df = pd.DataFrame(
            {'name': ['Li', 'He', 'Wu', 'Su', 'Li'],
             'age': [23, 32, 22, 35, 23],
             'fee': [3000, 5200, 3200, 5500, 2900],
             'date': [pd.Timestamp(2010, 1, 1)+day_offset*np.random.randint(1, 5) for _ in range(5)]
             }
        )

        print(
            f" >>> df\n"
            f"{df}\n"

            f"# 分别指定索引、列、数据进行数据透视重组\n"
            f" >>> df.pivot(index='name', columns='fee', values='age')\n"
            f"{df.pivot(index='name', columns='fee', values='age')}\n"

            f"# 有重复数据，触发异常\n"
            f" >>> df.pivot(index='name', columns='age')\n"
            # f"{df.pivot(index='name', columns='age')}\n"
            f"ValueError: Index contains duplicate entries, cannot reshape\n"

            f"# 设置多个数据值，产生两层列索引\n"
            f" >>> df.pivot(index='name', columns='date', values=['age', 'fee'])\n"
            f"{df.pivot(index='name', columns='date', values=['age', 'fee'])}\n"
        )

        return df

    def st_pt_pivot_table(self):
        """
        pivot_table(self, values=None, index=None, columns=None, aggfunc='mean', fill_value=None, margins=False,
                    dropna=True, margins_name='All', observed=False)
                    -> 'DataFrame'

            Create a spreadsheet-style pivot table as a DataFrame.

            The levels in the pivot table will be stored in MultiIndex objects
            (hierarchical indexes) on the index and columns of the result DataFrame.

            Parameters
            ----------
            values : column to aggregate, optional
            index : column, Grouper, array, or list of the previous
                If an array is passed, it must be the same length as the data. The
                list can contain any of the other types (except list).
                Keys to group by on the pivot table index.  If an array is passed,
                it is being used as the same manner as column values.
            columns : column, Grouper, array, or list of the previous
                If an array is passed, it must be the same length as the data. The
                list can contain any of the other types (except list).
                Keys to group by on the pivot table column.  If an array is passed,
                it is being used as the same manner as column values.
            aggfunc : function, list of functions, dict, default numpy.mean
                If list of functions passed, the resulting pivot table will have
                hierarchical columns whose top level are the function names
                (inferred from the function objects themselves)
                If dict is passed, the key is column to aggregate and value
                is function or list of functions.
            fill_value : scalar, default None
                Value to replace missing values with (in the resulting pivot table,
                after aggregation).
            margins : bool, default False
                Add all row / columns (e.g. for subtotal / grand totals).
            dropna : bool, default True
                Do not include columns whose entries are all NaN.
            margins_name : str, default 'All'
                Name of the row / column that will contain the totals
                when margins is True.
            observed : bool, default False
                This only applies if any of the gp are Categoricals.
                If True: only show observed values for categorical gp.
                If False: show all values for categorical gp.

                .. versionchanged:: 0.25.0

            Returns
            -------
            DataFrame
                An Excel style pivot table.

            See Also
            --------
            DataFrame.pivot : Pivot without aggregation that can handle
                non-numeric data.
            DataFrame.melt: Unpivot a DataFrame from wide to long format,
                optionally leaving identifiers set.
            wide_to_long : Wide panel to long format. Less flexible but more
                user-friendly than melt.

            Examples
            --------
            >>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
            ...                          "bar", "bar", "bar", "bar"],
            ...                    "B": ["one", "one", "one", "two", "two",
            ...                          "one", "one", "two", "two"],
            ...                    "C": ["small", "large", "large", "small",
            ...                          "small", "large", "small", "small",
            ...                          "large"],
            ...                    "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
            ...                    "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
            >>> df
                 A    B      C  D  E
            0  foo  one  small  1  2
            1  foo  one  large  2  4
            2  foo  one  large  2  5
            3  foo  two  small  3  5
            4  foo  two  small  3  6
            5  bar  one  large  4  6
            6  bar  one  small  5  8
            7  bar  two  small  6  9
            8  bar  two  large  7  9

            This first example aggregates values by taking the sum.

            >>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
            ...                     columns=['C'], aggfunc=np.sum)
            >>> table
            C        large  small
            A   B
            bar one    4.0    5.0
                two    7.0    6.0
            foo one    4.0    1.0
                two    NaN    6.0

            We can also fill missing values using the `fill_value` parameter.

            >>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
            ...                     columns=['C'], aggfunc=np.sum, fill_value=0)
            >>> table
            C        large  small
            A   B
            bar one      4      5
                two      7      6
            foo one      4      1
                two      0      6

            The next example aggregates by taking the mean across multiple columns.

            >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
            ...                     aggfunc={'D': np.mean,
            ...                              'E': np.mean})
            >>> table
                            D         E
            A   C
            bar large  5.500000  7.500000
                small  5.500000  8.500000
            foo large  2.000000  4.500000
                small  2.333333  4.333333

            We can also calculate multiple types of aggregations for any given
            value column.

            >>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
            ...                     aggfunc={'D': np.mean,
            ...                              'E': [min, max, np.mean]})
            >>> table
                            D    E
                        mean  max      mean  min
            A   C
            bar large  5.500000  9.0  7.500000  6.0
                small  5.500000  9.0  8.500000  8.0
            foo large  2.000000  5.0  4.500000  4.0
                small  2.333333  6.0  4.333333  2.0
        """

        # Grouper
        """
        class Grouper(builtins.object)
         |  Grouper(*args, **kwargs)
         |
         |  A Grouper allows the user to specify a groupby instruction for an object.
         |
         |  This specification will select a column via the key parameter, or if the
         |  level and/or axis parameters are given, a level of the index of the target
         |  object.
         |
         |  If `axis` and/or `level` are passed as keywords to both `Grouper` and
         |  `groupby`, the values passed to `Grouper` take precedence.
         |
         |  Parameters
         |  ----------
         |  key : str, defaults to None
         |      Groupby key, which selects the grouping column of the target.
         |  level : name/number, defaults to None
         |      The level for the target index.
         |  freq : str / frequency object, defaults to None
         |      This will groupby the specified frequency if the target selection
         |      (via key or level) is a datetime-like object. For full specification
         |      of available frequencies, please see `here
         |      <https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`_.
         |  axis : str, int, defaults to 0
         |      Number/name of the axis.
         |  sort : bool, default to False
         |      Whether to sort the resulting labels.
         |  closed : {'left' or 'right'}
         |      Closed end of interval. Only when `freq` parameter is passed.
         |  label : {'left' or 'right'}
         |      Interval boundary to use for labeling.
         |      Only when `freq` parameter is passed.
         |  convention : {'start', 'end', 'e', 's'}
         |      If grouper is PeriodIndex and `freq` parameter is passed.
         |  base : int, default 0
         |      Only when `freq` parameter is passed.
         |      For frequencies that evenly subdivide 1 day, the "origin" of the
         |      aggregated intervals. For example, for '5min' frequency, base could
         |      range from 0 through 4. Defaults to 0.
         |
         |      .. deprecated:: 1.1.0
         |          The new arguments that you should use are 'offset' or 'origin'.
         |
         |  loffset : str, DateOffset, timedelta object
         |      Only when `freq` parameter is passed.
         |
         |      .. deprecated:: 1.1.0
         |          loffset is only working for ``.resample(...)`` and not for
         |          Grouper (:issue:`28302`).
         |          However, loffset is also deprecated for ``.resample(...)``
         |          See: :class:`DataFrame.resample`
         |
         |  origin : {'epoch', 'start', 'start_day'}, Timestamp or str, default 'start_day'
         |      The timestamp on which to adjust the grouping. The timezone of origin must
         |      match the timezone of the index.
         |      If a timestamp is not used, these values are also supported:
         |
         |      - 'epoch': `origin` is 1970-01-01
         |      - 'start': `origin` is the first value of the timeseries
         |      - 'start_day': `origin` is the first day at midnight of the timeseries
         |
         |      .. versionadded:: 1.1.0
         |
         |  offset : Timedelta or str, default is None
         |      An offset timedelta added to the origin.
         |
         |      .. versionadded:: 1.1.0
         |
         |  dropna : bool, default True
         |      If True, and if group keys contain NA values, NA values together with
         |      row/column will be dropped. If False, NA values will also be treated as
         |      the key in groups.
         |
         |      .. versionadded:: 1.2.0
         |
         |  Returns
         |  -------
         |  A specification for a groupby instruction
         |
         |  Examples
         |  --------
         |  Syntactic sugar for ``df.groupby('A')``
         |
         |  >>> df = pd.DataFrame(
         |  ...     {
         |  ...         "Animal": ["Falcon", "Parrot", "Falcon", "Falcon", "Parrot"],
         |  ...         "Speed": [100, 5, 200, 300, 15],
         |  ...     }
         |  ... )
         |  >>> df
         |     Animal  Speed
         |  0  Falcon    100
         |  1  Parrot      5
         |  2  Falcon    200
         |  3  Falcon    300
         |  4  Parrot     15
         |  >>> df.groupby(pd.Grouper(key="Animal")).mean()
         |          Speed
         |  Animal
         |  Falcon    200
         |  Parrot     10
         |
         |  Specify a resample operation on the column 'Publish date'
         |
         |  >>> df = pd.DataFrame(
         |  ...    {
         |  ...        "Publish date": [
         |  ...             pd.Timestamp("2000-01-02"),
         |  ...             pd.Timestamp("2000-01-02"),
         |  ...             pd.Timestamp("2000-01-09"),
         |  ...             pd.Timestamp("2000-01-16")
         |  ...         ],
         |  ...         "ID": [0, 1, 2, 3],
         |  ...         "Price": [10, 20, 30, 40]
         |  ...     }
         |  ... )
         |  >>> df
         |    Publish date  ID  Price
         |  0   2000-01-02   0     10
         |  1   2000-01-02   1     20
         |  2   2000-01-09   2     30
         |  3   2000-01-16   3     40
         |  >>> df.groupby(pd.Grouper(key="Publish date", freq="1W")).mean()
         |                 ID  Price
         |  Publish date
         |  2000-01-02    0.5   15.0
         |  2000-01-09    2.0   30.0
         |  2000-01-16    3.0   40.0
         |
         |  If you want to adjust the start of the bins based on a fixed timestamp:
         |
         |  >>> start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00'
         |  >>> rng = pd.date_range(start, end, freq='7min')
         |  >>> ts = pd.Series(np.arange(len(rng)) * 3, index=rng)
         |  >>> ts
         |  2000-10-01 23:30:00     0
         |  2000-10-01 23:37:00     3
         |  2000-10-01 23:44:00     6
         |  2000-10-01 23:51:00     9
         |  2000-10-01 23:58:00    12
         |  2000-10-02 00:05:00    15
         |  2000-10-02 00:12:00    18
         |  2000-10-02 00:19:00    21
         |  2000-10-02 00:26:00    24
         |  Freq: 7T, dtype: int64
         |
         |  >>> ts.groupby(pd.Grouper(freq='17min')).sum()
         |  2000-10-01 23:14:00     0
         |  2000-10-01 23:31:00     9
         |  2000-10-01 23:48:00    21
         |  2000-10-02 00:05:00    54
         |  2000-10-02 00:22:00    24
         |  Freq: 17T, dtype: int64
         |
         |  >>> ts.groupby(pd.Grouper(freq='17min', origin='epoch')).sum()
         |  2000-10-01 23:18:00     0
         |  2000-10-01 23:35:00    18
         |  2000-10-01 23:52:00    27
         |  2000-10-02 00:09:00    39
         |  2000-10-02 00:26:00    24
         |  Freq: 17T, dtype: int64
         |
         |  >>> ts.groupby(pd.Grouper(freq='17min', origin='2000-01-01')).sum()
         |  2000-10-01 23:24:00     3
         |  2000-10-01 23:41:00    15
         |  2000-10-01 23:58:00    45
         |  2000-10-02 00:15:00    45
         |  Freq: 17T, dtype: int64
         |
         |  If you want to adjust the start of the bins with an `offset` Timedelta, the two
         |  following lines are equivalent:
         |
         |  >>> ts.groupby(pd.Grouper(freq='17min', origin='start')).sum()
         |  2000-10-01 23:30:00     9
         |  2000-10-01 23:47:00    21
         |  2000-10-02 00:04:00    54
         |  2000-10-02 00:21:00    24
         |  Freq: 17T, dtype: int64
         |
         |  >>> ts.groupby(pd.Grouper(freq='17min', offset='23h30min')).sum()
         |  2000-10-01 23:30:00     9
         |  2000-10-01 23:47:00    21
         |  2000-10-02 00:04:00    54
         |  2000-10-02 00:21:00    24
         |  Freq: 17T, dtype: int64
         |
         |  To replace the use of the deprecated `base` argument, you can now use `offset`,
         |  in this example it is equivalent to have `base=2`:
         |
         |  >>> ts.groupby(pd.Grouper(freq='17min', offset='2min')).sum()
         |  2000-10-01 23:16:00     0
         |  2000-10-01 23:33:00     9
         |  2000-10-01 23:50:00    36
         |  2000-10-02 00:07:00    39
         |  2000-10-02 00:24:00    24
         |  Freq: 17T, dtype: int64
         |
         |  Methods defined here:
         |
         |  __init__(self, key=None, level=None, freq=None, axis=0, sort=False, dropna=True)
         |      Initialize self.  See help(type(self)) for accurate signature.
         |
         |  __repr__(self) -> str
         |      Return repr(self).
         |
         |  ----------------------------------------------------------------------
         |  Static methods defined here:
         |
         |  __new__(cls, *args, **kwargs)
         |      Create and return a new object.  See help(type) for accurate signature.
         |
         |  ----------------------------------------------------------------------
         |  Data descriptors defined here:
         |
         |  __dict__
         |      dictionary for instance variables (if defined)
         |
         |  __weakref__
         |      list of weak references to the object (if defined)
         |
         |  ax
         |
         |  groups
         |
         |  ----------------------------------------------------------------------
         |  Data and other attributes defined here:
         |
         |  __annotations__ = {'_attributes': typing.Tuple[str, ...]}
        """

        day_offset = pd.offsets.DateOffset(days=1)

        df = pd.DataFrame(
            {'name': ['Li', 'He', 'Wu', 'Wu', 'Li', 'He'],
             'age': [23, 32, 22, 35, 23, 31],
             'fee': [3000, 5000, 3000, 5000, 2000, 3000],
             'date': [pd.Timestamp(2010, 1, 1)+day_offset*np.random.randint(1, 5) for _ in range(6)]
             },
            # index=[pd.Timestamp(2010, 1, 1)+day_offset*np.random.randint(1, 5) for _ in range(6)]
        )
        df.index.name = 'date'

        print(
            f" >>> df\n"
            f"{df}\n"

            f"# 设置索引、取值进行数据透视，使用np.mean进行聚合计算\n"
            f" >>> df.pivot_table(index=[df.index, 'name'], values=['age', 'fee'])\n"
            f"{df.pivot_table(index=[df.index, 'name'], values=['age', 'fee'])}\n"

            f"# 设置多个聚合函数进行数据计算. 不能进行聚合计算类型的列，自动去除\n"
            f" >>> df.pivot_table(index='name', values=['age', 'fee'], aggfunc=[max, min, np.mean])\n"
            f"{df.pivot_table(index='name', values=['age', 'fee'], aggfunc=[max, min, np.mean])}\n"

            f"# 设置margins=True，增加合计计算项，名称为margins_name。date列不适用与均值计算，不在结果中。\n"
            f" >>> df.pivot_table(index=['name', 'age'], values=['fee', 'date'], aggfunc=['count', max])\n"
            f"{df.pivot_table(index=['name', 'age'], values=['fee', 'date'], aggfunc=['count', max])}\n"

            f"# 如果数据类型不满足聚合计算，在数据透视表计算中自动去除\n"
            f" >>> df.pivot_table(index='fee', values=['name', 'age', 'date'], aggfunc=['count', max, np.mean])\n"
            f"{df.pivot_table(index='fee', values=['name', 'age', 'date'], aggfunc=['count', max, np.mean])}\n"
            
            f"# 设置margins=True，增加合计计算项，名称为margins_name\n"
            f" >>> df.pivot_table(index='name', values=['fee', 'age', 'date'], margins=True, margins_name='total')\n"
            f"{df.pivot_table(index='name', values=['fee', 'age', 'date'], margins=True, margins_name='total')}\n"
        )
        return df

    def st_pt_crosstab(self):
        # a = [np.random.randint(0, 2) for _ in range(6)]
        # b = [np.random.randint(0, 2) for _ in range(6)]
        # c = [np.random.randint(0, 2) for _ in range(6)]
        # a = [1, 0, 1, 0, 0, 0]
        # b = [1, 0, 1, 1, 0, 0]
        a = ['Apple', 'Pear', 'Apple', 'Pear', 'Apple', 'Apple', 'Pear', 'Apple', 'Pear', 'Pear']
        b = ['Big', 'Small', 'Big', 'Big', 'Small', 'Small', 'Big', 'Small', 'Small', 'Small']
        c = ['Red', 'Red', 'Green', 'Red', 'Red', 'Green', 'Red', 'Red', 'Green', 'Green']
        d = [20, 10, 15, 10, 8, 15, 12, 6, 50, 5]
        df = pd.DataFrame({'name': a, 'size': b, 'color': c, 'value': d})

        # data
        print(
            f"# data\n"
            f" >>> a = {a}\n"
            # f"{a}\n"
            f" >>> b = {b}\n"
            # f"{b}\n"
            f" >>> c = {c}\n"
            # f"{c}\n"
            f" >>> d = {d}\n"
            " >>> df = pd.DataFrame({'name': a, 'size': b, 'color': c, 'value': d})\n"
            f"{df}\n"
        )

       # index, columns, row/colnames, values, aggfunc
        print(
            f"# set  index, columns, row/colnames, values, aggfunc\n"
            " >>> pd.crosstab(np.array(a), columns=[b, c], rownames=['name'], colnames=['size', 'color'])\n"
            f"{pd.crosstab(np.array(a), columns=[b, c], rownames=['name'], colnames=['size', 'color'])}\n"
            " >>> pd.crosstab(df.name, columns=[df['size'], df['color']], values=d, aggfunc=[np.sum], "
            "rownames=['name'], colnames=['size', 'color'])\n"
            f"{pd.crosstab(np.array(a), columns=[df['size'], df['color']], values=d, aggfunc=[np.sum], rownames=['name'], colnames=['size', 'color'])}\n"
            " >>> pd.crosstab(np.array(a), columns=[b, c], values=d, aggfunc=[np.mean], "
            "rownames=['name'], colnames=['size', 'color'])\n"
            f"{pd.crosstab(np.array(a), columns=[b, c], values=d, aggfunc=[np.mean], rownames=['name'], colnames=['size', 'color'])}\n"
            " >>> pd.crosstab([np.array(a), np.array(b)], columns=[b, c], values=d, aggfunc=[np.mean])\n"
            f"{pd.crosstab([np.array(a), np.array(b)], columns=[b, c], values=d, aggfunc=[np.mean])}\n"
        )

        # normalize
        print(
            "# set normalize\n"
            " >>> pd.crosstab(np.array(a), columns=[b, c], normalize='index', rownames=['name'], colnames=['size', 'color'])\n"
            f"{pd.crosstab(np.array(a), columns=[b, c], normalize='index', rownames=['name'], colnames=['size', 'color'])}\n"
            " >>> pd.crosstab(np.array(a), columns=[b, c], normalize='columns', rownames=['name'], colnames=['size', 'color'])\n"
            f"{pd.crosstab(np.array(a), columns=[b, c], normalize='columns', rownames=['name'], colnames=['size', 'color'])}\n"
            " >>> pd.crosstab(np.array(a), columns=[b, c], normalize='all', rownames=['name'], colnames=['size', 'color'])\n"
            f"{pd.crosstab(np.array(a), columns=[b, c], normalize='all', rownames=['name'], colnames=['size', 'color'])}\n"
        )

        # margins, normalize
        # print(
        #     "# set margins, normalize\n"
        #     " >>> pd.crosstab(np.array(a), columns=[b, c], margins=True, rownames=['name'], colnames=['size', 'color'])\n"
        #     f"{pd.crosstab(np.array(a), columns=[b, c], margins=True, rownames=['name'], colnames=['size', 'color'])}\n"
        #     " >>> pd.crosstab(np.array(a), columns=[b, c], margins=True, normalize='index', rownames=['name'], colnames=['size', 'color'])\n"
        #     f"{pd.crosstab(np.array(a), columns=[b, c], margins=True, normalize='index', rownames=['name'], colnames=['size', 'color'])}\n"
        #     " >>> pd.crosstab(np.array(a), columns=[b], margins=True, normalize='columns', rownames=['name'], colnames=['size'])\n"
        #     f"{pd.crosstab(np.array(a), columns=[b], margins=True, normalize='columns', rownames=['name'], colnames=['size'])}\n"
        #     " >>> pd.crosstab(np.array(a), columns=[b, c], margins=True, normalize='columns', rownames=['name'], colnames=['size', 'color'])\n"
        #     "ValueError: Length of new names must be 1, got 2\n"
        #     # !!! f"{pd.crosstab(np.array(a), columns=[b, c], margins=True, normalize='columns')}\n"
        # )

    def st_mt_melt(self):
        """
        melt(self, id_vars=None, value_vars=None, var_name=None, value_name='value', col_level=None,
             ignore_index=True)
              -> 'DataFrame'

            Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.

            This function is useful to massage a DataFrame into a format where one
            or more columns are identifier variables (`id_vars`), while all other
            columns, considered measured variables (`value_vars`), are "unpivoted" to
            the row axis, leaving just two non-identifier columns, 'variable' and
            'value'.

            .. versionadded:: 0.20.0

            Parameters
            ----------
            id_vars : tuple, list, or ndarray, optional
                Column(s) to use as identifier variables.
            value_vars : tuple, list, or ndarray, optional
                Column(s) to unpivot. If not specified, uses all columns that
                are not set as `id_vars`.
            var_name : scalar
                Name to use for the 'variable' column. If None it uses
                ``frame.columns.name`` or 'variable'.
            value_name : scalar, default 'value'
                Name to use for the 'value' column.
            col_level : int or str, optional
                If columns are a MultiIndex then use this level to melt.
            ignore_index : bool, default True
                If True, original index is ignored. If False, the original index is retained.
                Index labels will be repeated as necessary.

                .. versionadded:: 1.1.0

            Returns
            -------
            DataFrame
                Unpivoted DataFrame.

            See Also
            --------
            melt : Identical method.
            pivot_table : Create a spreadsheet-style pivot table as a DataFrame.
            DataFrame.pivot : Return reshaped DataFrame organized
                by given index / column values.
            DataFrame.explode : Explode a DataFrame from list-like
                    columns to long format.

            Examples
            --------
            >>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
            ...                    'B': {0: 1, 1: 3, 2: 5},
            ...                    'C': {0: 2, 1: 4, 2: 6}})
            >>> df
               A  B  C
            0  a  1  2
            1  b  3  4
            2  c  5  6

            >>> df.melt(id_vars=['A'], value_vars=['B'])
               A variable  value
            0  a        B      1
            1  b        B      3
            2  c        B      5

            >>> df.melt(id_vars=['A'], value_vars=['B', 'C'])
               A variable  value
            0  a        B      1
            1  b        B      3
            2  c        B      5
            3  a        C      2
            4  b        C      4
            5  c        C      6

            The names of 'variable' and 'value' columns can be customized:

            >>> df.melt(id_vars=['A'], value_vars=['B'],
            ...         var_name='myVarname', value_name='myValname')
               A myVarname  myValname
            0  a         B          1
            1  b         B          3
            2  c         B          5

            Original index values can be kept around:

            >>> df.melt(id_vars=['A'], value_vars=['B', 'C'], ignore_index=False)
               A variable  value
            0  a        B      1
            1  b        B      3
            2  c        B      5
            0  a        C      2
            1  b        C      4
            2  c        C      6

            If you have multi-index columns:

            >>> df.columns = [list('ABC'), list('DEF')]
            >>> df
               A  B  C
               D  E  F
            0  a  1  2
            1  b  3  4
            2  c  5  6

            >>> df.melt(col_level=0, id_vars=['A'], value_vars=['B'])
               A variable  value
            0  a        B      1
            1  b        B      3
            2  c        B      5

            >>> df.melt(id_vars=[('A', 'D')], value_vars=[('B', 'E')])
              (A, D) variable_0 variable_1  value
            0      a          B          E      1
            1      b          B          E      3
            2      c          B          E      5
        """

        day_offset = pd.offsets.DateOffset(days=1)

        df = pd.DataFrame(
            {'name': ['Li', 'He', 'Wu', 'Su', 'Li', 'He'],
             'age': [23, 32, 22, 35, 23, 33],
             'fee': [3000, 5000, 3000, 5000, 2000, 3000],
             'date': [pd.Timestamp(2010, 1, 1)+day_offset*np.random.randint(1, 5) for _ in range(6)]
             },
            index=pd.MultiIndex.from_product([['a', 'b'], [1, 2, 3]])
        )
        df1 = df.unstack(level=0)

        print(
            f" >>> df\n"
            f"{df}\n"
            
            f"# 仅定义id_vars，其余列名将作为variable的变量\n"
            f" >>> df.melt(id_vars=['age'])\n"
            f"{df.melt(id_vars=['age'])}\n"
            
            f"# 仅设置value_vars，没有分类列，variable中只包含所设置的列名值\n"
            f" >>> df.melt(value_vars=['age', 'fee'])\n"
            f"{df.melt(value_vars=['age', 'fee'])}\n"
            
            f"# 使用模块方法melt, 除设置数据集frame=df外，其余参数使用方法相同\n"
            f" >>> pd.melt(frame=df, id_vars=['name'], value_vars=['fee', 'date'])\n"
            f"{pd.melt(df, id_vars=['name'], value_vars=['fee', 'date'])}\n"
            
            f"{'-'*80}\n"
            f"# 多层列索引数据集\n"
            f" >>> df1\n"
            f"{df1}\n"
            
            f"# 设置标识列id_vars, 多层列索引将使用两个值列进行表示variable_0、variable_1\n"
            f" >>> df1.melt(id_vars=[('name', 'a')])\n"
            f"{df1.melt(id_vars=[('name', 'a')])}\n"

            f"# 设置value_vars限制所表示的列\n"
            f" >>> df1.melt(id_vars=[('name', 'a')], value_vars=[('fee', 'b'), ('date', 'a')])\n"
            f"{df1.melt(id_vars=[('name', 'a')], value_vars=[('fee', 'b'), ('date', 'a')])}\n"
            
            f"# 使用col_level，对多层次列索引分层熔化\n"
            f" >>> df[[('name', 'a'), ('name', 'b')]].melt(col_level=1, id_vars=['a'], value_vars=['b'])\n"
            f"{df1[[('name', 'a'), ('name', 'b')]].melt(col_level=1, id_vars=['a'], value_vars=['b'])}\n"
            
            f"# 使用col_level，如果一个层次中有重复列名, 则不能熔化\n"
            f" >>> df1.melt(col_level=1, id_vars=['a'], value_vars=['b'])\n"
            # f"{df.melt(col_level=1, id_vars=['a'], value_vars=['b'])}\n"
            f"KeyError: None of [Index([('name', 'a'), ('name', 'b')], dtype='object')] are in the [columns]"
        )
        return df1
