# coding: utf-8

import numpy as np
import pandas as pd


class Demo:

    def __init__(self):
        self.df = pd.DataFrame(np.arange(60, 84).reshape(12, 2),
                               columns=['math', 'science'],
                               index=pd.MultiIndex.from_product(
                               [[1, 2, 3], ['t1', 't2', 't3', 't4']]),
                               dtype=np.uint8)

    def data_sort(self):
        data = [[i, j] for i, j in zip(range(60, 84, 2), range(85, 61, -2))]
        df = pd.DataFrame(data=data,
                          columns=['math', 'science'],
                          index=pd.MultiIndex.from_product(
                               [[1, 2, 3], ['t1', 't2', 't3', 't4']]),
                          dtype=np.uint8)
        df = df.unstack()
        print('=== Data Adjust ===\n'
              ' >>> df\n'
              f'{df}\n'
              " --- sort/add/drop/merge/concat ---\n")

        print(f"{'='*80}\n"
              "sort_index(axis=0, level=None, ascending=True, \n"
              "           inplace=False, kind='quicksort', na_position='last',\n"
              "           sort_remaining=True, by=None)")

        print(f"{'-'*80}\n"
              "df.sort_index(axis=0, ascending=False)\n"
              f"{df.sort_index(axis=0, ascending=False)}")

        print(f"f{'-'*80}\n"
              "df.sort_index(axis=1, level=1, ascending=False)\n"
              f"{df.sort_index(axis=1, level=1, ascending=False)}")

        print(f"{'-'*80}\n"
              "df.sort_index(axis=1, level=[0, 1], ascending=[False, True]))\n"
              f"{df.sort_index(axis=1, level=[0, 1], ascending=[False, True])}")

        print(f"{'-'*80}\n"
              "df.sort_index(axis=1, level=0, ascending=False, sort_remaining=False))\n"
              f"{df.sort_index(axis=1, level=[0, 1], ascending=[False, False], sort_remaining=False)}\n"
              "Invalid: sort_remaining fail")

        print(f"{'='*80}\n"
              "sort_values(by, axis=0, ascending=True, inplace=False, kind='quicksort', na_position='last')\n"
              f"{'-'*80}\n"
              " >>> df.sort_values(by=1, axis=1, ascending=False)\n"
              f"{df.sort_values(by=1, axis=1, ascending=False)}")

        df = df.applymap(lambda x: np.random.randint(50, 100))
        print(f"{'-'*80}\n"
              f" >>> df\n"
              f"{df}\n"
              " >>> df.sort_values(by=('math', 't3'), axis=0, ascending=False)\n"
              f"{df.sort_values(by=('math', 't3'), axis=0, ascending=False)}")

        return

    def data_add_loc(self):
        df = self.df.unstack()

        df1 = df.copy(deep=True)
        df1.loc[:, ('new_col', 't0')] = 100
        df2 = df1.copy()
        df2.loc[0, :] = range(9)
        print(
            f"{'-'*80}\n"
            " >>> df\n"
            f"{df}\n"
            
            f"{'-'*80}\n"
            f"# 增加列数据\n"
            " >>> df.loc[:, ('new_col', 't0')] = 100\n"
            f"{df1}\n"
            
            f"{'-'*80}\n"
            f"# 增加行数据\n"
            " >>> df.loc[4, :] = range(9)\n"
            f"{df2}\n"

            f"{'-'*80}\n"
            )

    def data_add_insert(self):
        print(f"{'='*80}")
        df = self.df.filter(regex='\([12].*[12]', axis=0)
        print("df")
        print(df)

        print(f"{'-'*80}")
        print("df.insert(loc=1, column='english', value=80)")
        df.insert(loc=1, column='english', value=80)
        print(df)

        print(f"{'-'*80}")
        print("df.insert(loc=1, column='math', value=80, allow_duplicates=True)")
        df.insert(loc=1, column='math', value=80, allow_duplicates=True)
        print(df)

        print(f"{'='*80}")
        df = self.df.filter(regex='\([12].*[12]', axis=0)
        print("df")
        print(df)

        print(
            f"{'='*80}\n"
            "赋值告警问题：SettingWithCopyWarning， 视图链赋值的不确定性！\n"
            f"{'-'*80}\n"
            ".loc赋值： df.loc[:, 'english'] = 100"
            )
        try:
            df.loc[:, 'english'] = 100
        except Warning as e:
            print(e)
        print(df)

        print(
            f"{'-'*80}"
            "无警告.loc赋值：\n"
            " >>> df1 = df.copy()\n"
            " >>> df1.loc[: 'english'] = 120\n"
            )
        df1 = df.copy(True)
        df1.loc[:, 'english'] = 120
        print(df1)

        # Strange, erratic, odd Situation !!!
        # no problem, only dtype = uint8
        # df.index = [chr(65+j) for j in range(4)]
        # df[3:6] = 200
        # df1 = df.copy(deep=True)
        # df[3:6] = 50
        # df.iloc[3:6, :] = 100
        # print(df)
        print(
            f"{'-'*80}\n"
            "# 直接使用切片赋值, Pandas给出警告: SettingWithCopyWarning\n"
            " >>> df[3:6] = 200\n"
            f"{'' if exec('df[3:6]=200') else ''}"
            f"{df}\n"

            f"{'-'*80}\n"
            "# 拷贝后进行切片赋值，无警告信息\n"
            " >>> df1 = df.copy()\n"
            f"{'' if exec('df1 = df.copy()') else ''}"
            f"{df1}\n"
            " >>> df1[3:6] = 123\n"
            f"{'' if exec('df1[0:2] = 123') else ''}"
            " >>> df = df1\n"
            f"{'' if exec('df = df1') else ''}"
            f"{df1}")

        return df1

    def data_add_assign(self):
        """
        assign(self, **kwargs) -> 'DataFrame'
            Assign new columns to a DataFrame.

            Returns a new object with all original columns in addition to new ones.
            Existing columns that are re-assigned will be overwritten.

            Parameters
            ----------
            **kwargs : dict of {str: callable or Series}
                The column names are keywords. If the values are
                callable, they are computed on the DataFrame and
                assigned to the new columns. The callable must not
                change input DataFrame (though pandas doesn't check it).
                If the values are not callable, (e.g. a Series, scalar, or array),
                they are simply assigned.

            Returns
            -------
            DataFrame
                A new DataFrame with the new columns in addition to
                all the existing columns.

            Notes
            -----
            Assigning multiple columns within the same ``assign`` is possible.
            Later items in '\*\*kwargs' may refer to newly created or modified
            columns in 'df'; items are computed and assigned into 'df' in order.

            Examples
            --------
            >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
            ...                   index=['Portland', 'Berkeley'])
            >>> df
                      temp_c
            Portland    17.0
            Berkeley    25.0

            Where the value is a callable, evaluated on `df`:

            >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
                      temp_c  temp_f
            Portland    17.0    62.6
            Berkeley    25.0    77.0

            Alternatively, the same behavior can be achieved by directly
            referencing an existing Series or sequence:

            >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
                      temp_c  temp_f
            Portland    17.0    62.6
            Berkeley    25.0    77.0

            You can create multiple columns within the same assign where one
            of the columns depends on another one defined within the same assign:

            >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
            ...           temp_k=lambda x: (x['temp_f'] +  459.67) * 5 / 9)
                      temp_c  temp_f  temp_k
            Portland    17.0    62.6  290.15
            Berkeley    25.0    77.0  298.15
        """
        df = self.df.iloc[0:5]
        df.index = range(5)
        print(
            " >>> df\n"
            f"{df}\n"
            f" >>> df.assign(new_col=100)\n"
            f" {df.assign(new_col=100)}\n"
            " >>> df.assign(new_col=df['math']*1.2)\n"
            f"{df.assign(math2=df['math']*1.2)}\n"
            f" >>> df.assign(total=lambda x: x.math + x.science,\n"
            f"               zhf=lambda x: x.math*0.7+x.science*0.3)\n"
            f"{df.assign(zf=lambda x: x.math + x.science, zhf=lambda x: x.math*0.7+x.science*0.3)}\n"
        )

    def data_add_insert2(self):
        """
        insert(self, loc, column, value, allow_duplicates=False) -> None
            Insert column into DataFrame at specified location.

            Raises a ValueError if `column` is already contained in the DataFrame,
            unless `allow_duplicates` is set to True.

            Parameters
            ----------
            loc : int
                Insertion index. Must verify 0 <= loc <= len(columns).
            column : str, number, or hashable object
                Label of the inserted column.
            value : int, Series, or array-like
            allow_duplicates : bool, optional
        """

        df = self.df

        print(
            " >>> df\n"
            f"{df}\n"

            "# 在位置1插入列'col-2', 并对该列赋值100\n"
            " >>> df.insert(loc=1, column='col-2', value=100)\n"
            f"{df.insert(loc=1, column='col-2', value=100)}\n"
            f"{df}\n"

            "# 在位置1插入列'col-2', 并对该列赋值100\n"
            " >>> df.insert(loc=1, column='C', value=200, allow_duplicates=True)\n"
            f"{df.insert(loc=1, column='C', value=200, allow_duplicates=True)}\n"
            f"{df}\n"

            "# 删除重复列数据\n"
            " >>> valid_columns = [True, True, True, True, False]\n"
            " >>> df.loc[:, valid_columns]\n"
            f"{df.loc[:, [True] * 4 + [False]]}"
        )
        return df

    def data_add_append(self):
        # append method for DataFrame
        """
        append(self, other, ignore_index=False, verify_integrity=False, sort=False)

            -> 'DataFrame'

            Append rows of `other` to the end of caller, returning a new object.

            Columns in `other` that are not in the caller are added as new columns.

            Parameters
            ----------
            other : DataFrame or Series/dict-like object, or list of these
                The data to append.
            ignore_index : bool, default False
                If True, the resulting axis will be labeled 0, 1, …, n - 1.
            verify_integrity : bool, default False
                If True, raise ValueError on creating index with duplicates.
            sort : bool, default False
                Sort columns if the columns of `self` and `other` are not aligned.

                .. versionadded:: 0.23.0
                .. versionchanged:: 1.0.0
                    Changed to not sort by default.

            Returns
            -------
            DataFrame

            See Also
            --------
            concat : General function to concatenate DataFrame or Series objects.

            Notes
            -----
            If a list of dict/series is passed and the keys are all contained in
            the DataFrame's index, the order of the columns in the resulting
            DataFrame will be unchanged.

            Iteratively appending rows to a DataFrame can be more computationally
            intensive than a single concatenate. A better solution is to append
            those rows to a list and then concatenate the list with the original
            DataFrame all at once.
            Examples
            --------
            >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
            >>> df
               A  B
            0  1  2
            1  3  4
            >>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
            >>> df.append(df2)
               A  B
            0  1  2
            1  3  4
            0  5  6
            1  7  8

            With `ignore_index` set to True:

            >>> df.append(df2, ignore_index=True)
               A  B
            0  1  2
            1  3  4
            2  5  6
            3  7  8

            The following, while not recommended methods for generating DataFrames,
            show two ways to generate a DataFrame from multiple data sources.

            Less efficient:

            >>> df = pd.DataFrame(columns=['A'])
            >>> for i in range(5):
            ...     df = df.append({'A': i}, ignore_index=True)
            >>> df
               A
            0  0
            1  1
            2  2
            3  3
            4  4

            More efficient:

            >>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
            ...           ignore_index=True)
               A
            0  0
            1  1
            2  2
            3  3
            4  4

        """
        df1 = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
        df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
        df3 = pd.DataFrame([[5, 6, 9], [7, 8, 10]], columns=list('ABC'))
        # return df1, df3
        print(
            f"{'='*80}\n"
            f"# 对DataFrame数据集使用append方法添加数据\n"
            " >>> df1\n"
            f"{df1}\n"
            " >>> df2\n"
            f"{df2}\n"
            " >>> df3\n"
            f"{df3}\n"
            " >>> df1.append(df2)\n"
            f"{df1.append(df2)}"
            " >>> df1.append(df3)\n"
            f"{df1.append(df3)}"
        )


        # append method for Series
        """
        append(self, to_append, ignore_index=False, verify_integrity=False)
            Concatenate two or more Series.
        
            Parameters
            ----------
            to_append : Series or list/tuple of Series
                Series to append with self.
            ignore_index : bool, default False
                If True, the resulting axis will be labeled 0, 1, …, n - 1.
            verify_integrity : bool, default False
                If True, raise Exception on creating index with duplicates.
        
            Returns
            -------
            Series
                Concatenated Series.
        
            See Also
            concat : General function to concatenate DataFrame or Series objects.
        
            Notes
            -----
            Iteratively appending to a Series can be more computationally intensive
            than a single concatenate. A better solution is to append values to a
            list and then concatenate the list with the original Series all at
            once.
        
            Examples
            --------
            >>> s1 = pd.Series([1, 2, 3])
            >>> s2 = pd.Series([4, 5, 6])
            >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])
            >>> s1.append(s2)
            0    1
            1    2
            2    3
            0    4
            1    5
            2    6
            --------
            dtype: int64
        
            >>> s1.append(s3)
            0    1
            1    2
            2    3
            3    4
            4    5
            5    6
            dtype: int64
        
            With `ignore_index` set to True:
        
            >>> s1.append(s2, ignore_index=True)
            0    1
            1    2
            2    3
            3    4
            4    5
            5    6
            dtype: int64
        
            With `verify_integrity` set to True:
        
            >>> s1.append(s2, verify_integrity=True)
            Traceback (most recent call last):
            ...
            ValueError: Indexes have overlapping values: [0, 1, 2]
        """
        s1 = pd.Series([1, 2, 3])
        s2 = pd.Series([4, 5, 6])
        s3 = pd.Series([5, 7, 9], index=[3, 4, 5])
        s4 = pd.Series([6, 7, 8], index=[6, 7, 8])
        print(
            f"{'='*80}\n"
            f"# 对Series数据集使用append方法添加数据"
            " >>> s1 = pd.Series([1, 2, 3])\n"
            " >>> s2 = pd.Series([4, 5, 6])\n"
            " >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5])\n"
            " >>> s1.append(s2)     # 将说s2添加到s1的尾部\n"
            f"{s1.append(s2)}\n"
            " >>> s1.append([s2, s3])   # 将s2、s3添加到s1的尾部，忽略索引重复\n"
            f"{s1.append([s2, s3], ignore_index=True)}"
            )
        print(" >>> s1.append([s2, s3], verify_integrity=True)")
        print(f"{s1.append([s2, s3], verify_integrity=True)}")
        return

    def data_del_drop(self):
        """
        drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise') -> 'Series'
            Return Series with specified index labels removed.

            Remove elements of a Series based on specifying the index labels.
            When using a multi-index, labels on different levels can be removed
            by specifying the level.

            Parameters
            ----------
            labels : single label or list-like
                Index labels to drop.
            axis : 0, default 0
                Redundant for application on Series.
            index : single label or list-like
                Redundant for application on Series, but 'index' can be used instead
                of 'labels'.
            columns : single label or list-like
                No change is made to the Series; use 'index' or 'labels' instead.
            level : int or level name, optional
                For MultiIndex, level for which the labels will be removed.
            inplace : bool, default False
                If True, do operation inplace and return None.
            errors : {'ignore', 'raise'}, default 'raise'
                If 'ignore', suppress error and only existing labels are dropped.

            Returns
            -------
            Series
                Series with specified index labels removed.

            Raises
            ------
            KeyError
                If none of the labels are found in the index.

            See Also
            --------
            Series.reindex : Return only specified index labels of Series.
            Series.dropna : Return series without null values.
            Series.drop_duplicates : Return Series with duplicate values removed.
            DataFrame.drop : Drop specified labels from rows or columns.

            Examples
            --------
            >>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C'])
            >>> s
            A  0
            B  1
            C  2
            dtype: int64

            Drop labels B en C

            >>> s.drop(labels=['B', 'C'])
            A  0
            dtype: int64

            Drop 2nd level label in MultiIndex Series

            >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
            ...                              ['speed', 'weight', 'length']],
            ...                      codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
            ...                             [0, 1, 2, 0, 1, 2, 0, 1, 2]])
            >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3],
            ...               index=midx)
            >>> s
            lama    speed      45.0
                    weight    200.0
                    length      1.2
            cow     speed      30.0
                    weight    250.0
                    length      1.5
            falcon  speed     320.0
                    weight      1.0
                    length      0.3
            dtype: float64

            >>> s.drop(labels='weight', level=1)
            lama    speed      45.0
                    length      1.2
            cow     speed      30.0
                    length      1.5
            falcon  speed     320.0
                    length      0.3
            dtype: float64
        """

        # drop method for DataFrame
        """
        drop(self, labels=None, axis=0, index=None, columns=None, level=None, inplace=False, errors='raise')
            Drop specified labels from rows or columns.
        
            Remove rows or columns by specifying label names and corresponding
            axis, or by specifying directly index or column names. When using a
            multi-index, labels on different levels can be removed by specifying
            the level.
        
            Parameters
            ----------
            labels : single label or list-like
                Index or column labels to drop.
            axis : {0 or 'index', 1 or 'columns'}, default 0
                Whether to drop labels from the index (0 or 'index') or
                columns (1 or 'columns').
            index : single label or list-like
                Alternative to specifying axis (``labels, axis=0``
                is equivalent to ``index=labels``).
            columns : single label or list-like
                Alternative to specifying axis (``labels, axis=1``
                is equivalent to ``columns=labels``).
            level : int or level name, optional
                For MultiIndex, level from which the labels will be removed.
            inplace : bool, default False
                If False, return a copy. Otherwise, do operation
                inplace and return None.
            errors : {'ignore', 'raise'}, default 'raise'
                If 'ignore', suppress error and only existing labels are
                dropped.
        
            Returns
            -------
            DataFrame
                DataFrame without the removed index or column labels.
        
            Raises
            ------
            KeyError
                If any of the labels is not found in the selected axis.
        
            See Also
            --------
            DataFrame.loc : Label-location based indexer for selection by label.
            DataFrame.dropna : Return DataFrame with labels on given axis omitted
                where (all or any) data are missing.
            DataFrame.drop_duplicates : Return DataFrame with duplicate rows
                removed, optionally only considering certain columns.
            Series.drop : Return Series with specified index labels removed.
        
            Examples
            --------
            >>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
            ...                   columns=['A', 'B', 'C', 'D'])
            >>> df
               A  B   C   D
            0  0  1   2   3
            1  4  5   6   7
            2  8  9  10  11
        
            Drop columns
        
             >>> df.drop(['B', 'C'], axis=1)
               A   D
            0  0   3
            1  4   7
            2  8  11
        
            >>> df.drop(columns=['B', 'C'])
               A   D
            0  0   3
            1  4   7
            2  8  11
        
            Drop a row by index
        
            >>> df.drop([0, 1])
               A  B   C   D
            2  8  9  10  11
        
            Drop columns and/or rows of MultiIndex DataFrame
        
            >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
            ...                              ['speed', 'weight', 'length']],
            ...                      codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
            ...                             [0, 1, 2, 0, 1, 2, 0, 1, 2]])
            >>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
            ...                   data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
            ...                         [250, 150], [1.5, 0.8], [320, 250],
            ...                         [1, 0.8], [0.3, 0.2]])
            >>> df
                            big     small
            lama    speed   45.0    30.0
                    weight  200.0   100.0
                    length  1.5     1.0
            cow     speed   30.0    20.0
                    weight  250.0   150.0
                    length  1.5     0.8
            falcon  speed   320.0   250.0
                    weight  1.0     0.8
                    length  0.3     0.2
        
            >>> df.drop(index='cow', columns='small')
                            big
            lama    speed   45.0
                    weight  200.0
                    length  1.5
            falcon  speed   320.0
                    weight  1.0
                    length  0.3
        
            >>> df.drop(index='length', level=1)
                            big     small
            lama    speed   45.0    30.0
                    weight  200.0   100.0
            cow     speed   30.0    20.0
                    weight  250.0   150.0
            falcon  speed   320.0   250.0
                    weight  1.0     0.8
       """
        from ch4data.sec0_datasets import DataSet
        df = DataSet.df5
        df_info = df.info()

        def try_text(data):
            try:
                data.drop(index=['1', 't5'], level=1)
            except KeyError as e:
                return 'keyError: ' + str(e)

        print(
            f"{'='*80}\n"
            " >>> df\n"
            f"{df}\n"
            f"{df_info}\n")

        drop_demo = (
            "# 删除行数据\n"

            "# 删除行索引标签为1、3的行\n"
            " >>> df.drop(labels=[1, 3], axis=0)\n"
            f"{df.drop(labels=[1, 3], axis=0)}\n"

            " >>> df.drop(labels=['t2', 't4'], axis=0, level=1)\n"
            f"{df.drop(labels=['t2', 't4'], axis=0, level=1)}\n"

            f" {'-'*80}\n"
            "# 删除列数据\n"

            f"# 删除列名为‘math’的列\n"
            " >>> df.drop(index=1, columns='math')\n"
            f"{df.drop(index=1, columns='math')}\n"

            f" {'-'*80}\n"
            "# 删除行列数据\n"
            " >>> df.drop(index=[(1, 't1'), (2, 't2')], columns='math')\n"
            f"{df.drop(index=[(1, 't1'), (2, 't2')], columns='math')}\n"
           
            f"{'-'*80}\n"
            "# 索引标签都不存在时， 会触发异常KeyError\n"
            "# raise Error when all index values not in index\n"
            " >>> df.drop(index=['1', 't5'], level=1)\n"
            f"{try_text(df)}\n"
            
            f"{'-'*80}\n"
            "# 设置errors参数为‘ignore’，可以抑制异常 use errors to suppress Error\n"
            " >>> df.drop(index=['1', 't5'], level=1, errors='ignore')\n"
            f"{df.drop(index=['1', 't5'], level=1, errors='ignore')}\n"
            
            # f"{'-'*80}\n"
            # "# 部分标签值有效，只删除有效标签的数据，不会触发异常 ??? \n"
            # " partial index values are valid, no Error raised\n"
            # " >>> df.drop(index=['t2', 't5'], level=1)\n"
            # f"{df.drop(index=['t2', 't5'], level=1)}\n"
            )
        print(drop_demo)

        return

    def data_del_drop_duplicates(self):
        # drop_duplicates for Series
        """
        drop_duplicates(self, keep='first', inplace=False)

            -> Union[ForwardRef('Series'), NoneType]

            Return Series with duplicate values removed.

            Parameters
            ----------
            keep : {'first', 'last', ``False``}, default 'first'
                Method to handle dropping duplicates:

                - 'first' : Drop duplicates except for the first occurrence.
                - 'last' : Drop duplicates except for the last occurrence.
                - ``False`` : Drop all duplicates.

            inplace : bool, default ``False``
                If ``True``, performs operation inplace and returns None.

            Returns
            -------
            Series
                Series with duplicates dropped.

            See Also
            --------
            Index.drop_duplicates : Equivalent method on Index.
            DataFrame.drop_duplicates : Equivalent method on DataFrame.
            Series.duplicated : Related method on Series, indicating duplicate
                Series values.

            Examples
            --------
            Generate a Series with duplicated entries.

            >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'],
            ...               name='animal')
            >>> s
            0      lama
            1       cow
            2      lama
            3    beetle
            4      lama
            5     hippo
            Name: animal, dtype: object

            With the 'keep' parameter, the selection behaviour of duplicated values
            can be changed. The value 'first' keeps the first occurrence for each
            set of duplicated entries. The default value of keep is 'first'.

            >>> s.drop_duplicates()
            0      lama
            1       cow
            3    beetle
            5     hippo
            Name: animal, dtype: object

            The value 'last' for parameter 'keep' keeps the last occurrence for
            each set of duplicated entries.

            >>> s.drop_duplicates(keep='last')
            1       cow
            3    beetle
            4      lama
            5     hippo
            Name: animal, dtype: object

        :return:
        """
        sr = pd.Series([1, 1, 2, 2, 3],
                       index=[0, 0, 1, 2, 3])
        print(
            f"{'='*80}\n"
            "# Series with duplicate data\n"
            f" >>> sr\n"
            f"{sr}\n"
            " >>> sr.drop_duplicates()\n"
            f"{sr.drop_duplicates()}\n"
            f" >>> sr.drop_duplicates(keep='first')\n"
            f"{sr.drop_duplicates(keep='first')}"
        )

        # drop_duplicates for DataFrame
        """
        drop_duplicates(self, subset: Union[Hashable, Sequence[Hashable], NoneType] = None, 
            keep: Union[str, bool] = 'first', inplace: bool = False, ignore_index: bool = False) 
            
            -> Union[ForwardRef('DataFrame'), NoneType]
            
            Return DataFrame with duplicate rows removed.
        
            Considering certain columns is optional. Indexes, including time indexes
            are ignored.
        
            Parameters
            ----------
            subset : column label or sequence of labels, optional
                Only consider certain columns for identifying duplicates, by
                default use all of the columns.
            keep : {'first', 'last', False}, default 'first'
                Determines which duplicates (if any) to keep.
                - ``first`` : Drop duplicates except for the first occurrence.
                - ``last`` : Drop duplicates except for the last occurrence.
                - False : Drop all duplicates.
            inplace : bool, default False
                Whether to drop duplicates in place or to return a copy.
            ignore_index : bool, default False
                If True, the resulting axis will be labeled 0, 1, …, n - 1.
        
                .. versionadded:: 1.0.0
        
            Returns
            -------
            DataFrame
                DataFrame with duplicates removed or None if ``inplace=True``.
        
            See Also
            --------
            DataFrame.value_counts: Count unique combinations of columns.
        
            Examples
            --------
            Consider dataset containing ramen rating.
        
            >>> df = pd.DataFrame({
            ...     'brand': ['Yum Yum', 'Yum Yum', 'Indomie', 'Indomie', 'Indomie'],
            ...     'style': ['cup', 'cup', 'cup', 'pack', 'pack'],
            ...     'rating': [4, 4, 3.5, 15, 5]
            ... })
            >>> df
                brand style  rating
            0  Yum Yum   cup     4.0
            1  Yum Yum   cup     4.0
            2  Indomie   cup     3.5
            3  Indomie  pack    15.0
            4  Indomie  pack     5.0
        
            By default, it removes duplicate rows based on all columns.
        
            >>> df.drop_duplicates()
                brand style  rating
            0  Yum Yum   cup     4.0
            2  Indomie   cup     3.5
            3  Indomie  pack    15.0
            4  Indomie  pack     5.0
        
            To remove duplicates on specific column(s), use ``subset``.
        
            >>> df.drop_duplicates(subset=['brand'])
                 brand style  rating
            0  Yum Yum   cup     4.0
            2  Indomie   cup     3.5
        
            To remove duplicates and keep last occurences, use ``keep``.
        
            >>> df.drop_duplicates(subset=['brand', 'style'], keep='last')
                brand style  rating
            1  Yum Yum   cup     4.0
            2  Indomie   cup     3.5
            4  Indomie  pack     5.0
        """
        return

    def data_tr_applymap(self):
        """
        applymap(self, func, na_action: 'Optional[str]' = None)
            -> 'DataFrame'

            Apply a function to a Dataframe elementwise.

            This method applies a function that accepts and returns a scalar
            to every element of a DataFrame.

            Parameters
            ----------
            func : callable
                Python function, returns a single value from a single value.
            na_action : {None, 'ignore'}, default None
               If ‘ignore’, propagate NaN values, without passing them to func.

                .. versionadded:: 1.2

            Returns
            -------
            DataFrame
                Transformed DataFrame.

            See Also
            --------
            DataFrame.apply : Apply a function along input axis of DataFrame.
            Examples
            --------
            >>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
            >>> df
                   0      1
            0  1.000  2.120
            1  3.356  4.567

            >>> df.applymap(lambda x: len(str(x)))
               0  1
            0  3  4
            1  5  5
            Like Series.map, NA values can be ignored:

            >>> df_copy = df.copy()
            >>> df_copy.iloc[0, 0] = pd.NA
            >>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')
                  0  1
            0  <NA>  4
            1     5  5

            Note that a vectorized version of `func` often exists, which will
            be much faster. You could square each number elementwise.

            >>> df.applymap(lambda x: x**2)
            Like Series.map, NA values can be ignored:

            >>> df_copy = df.copy()
            >>> df_copy.iloc[0, 0] = pd.NA
            >>> df_copy.applymap(lambda x: len(str(x)), na_action='ignore')
                  0  1
            0  <NA>  4
            1     5  5

            Note that a vectorized version of `func` often exists, which will
            be much faster. You could square each number elementwise.

            >>> df.applymap(lambda x: x**2)

            >>> df.applymap(lambda x: x**2)
                       0          1
            0   1.000000   4.494400
            1  11.262736  20.857489

            But it's better to avoid applymap in that case.

            >>> df ** 2
                       0          1
            0   1.000000   4.494400
            1  11.262736  20.857489
        """
        df = self.df.drop(index=['t3', 't4'], level=1, axis=0)
        df = df.drop(labels=(2, 't2'))
        df = df.unstack()

        print(
            f" >>> df\n"
            f"{df}\n"
            
            f"# 存在NaN值时，可以设置忽略NaN值处理，否则会触发异常：\n"
            f" >>> df.applymap(np.int)\n"
            # f"{df.applymap(np.int)}\n"
            f" ......\n"
            f" ValueError: cannot convert float NaN to integer\n"

            f"# 设置忽略NaN值处理，不会触发异常，NaN值未作处理\n"
            f" >>> df.applymap(np.sqrt, na_action='ignore')\n"
            f"{df.applymap(np.sqrt, na_action='ignore')}\n"

            f" >>> df.applymap(lambda x: 0 if np.isnan(x) else int(x))\n"
            f"{df.applymap(lambda x: 0 if np.isnan(x) else int(x))}"
        )

        # map for Series
        """
        map(self, arg, na_action=None) -> 'Series'
            
            Map values of Series according to input correspondence.
        
            Used for substituting each value in a Series with another value,
            that may be derived from a function, a ``dict`` or
            a :class:`Series`.
        
            Parameters
            ----------
            arg : function, collections.abc.Mapping subclass or Series
                Mapping correspondence.
            na_action : {None, 'ignore'}, default None
                If 'ignore', propagate NaN values, without passing them to the
                mapping correspondence.
        
            Returns
            -------
            Series
                Same index as caller.
        
            See Also
            --------
            Series.apply : For applying more complex functions on a Series.
            DataFrame.apply : Apply a function row-/column-wise.
            DataFrame.applymap : Apply a function elementwise on a whole DataFrame.
        
            Notes
            -----
            When ``arg`` is a dictionary, values in Series that are not in the
            dictionary (as keys) are converted to ``NaN``. However, if the
            dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
            provides a method for default values), then this default is used
            rather than ``NaN``.
        
            Examples
            --------
            >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
            >>> s
            0      cat
            1      dog
            2      NaN
            3   rabbit
            dtype: object
        
            ``map`` accepts a ``dict`` or a ``Series``. Values that are not found
            in the ``dict`` are converted to ``NaN``, unless the dict has a default
            value (e.g. ``defaultdict``):
        
            >>> s.map({'cat': 'kitten', 'dog': 'puppy'})
            0   kitten
            1    puppy
            2      NaN
            3      NaN
            dtype: object
        
            It also accepts a function:
        
            >>> s.map('I am a {}'.format)
            0       I am a cat
            1       I am a dog
            2       I am a nan
            3    I am a rabbit
            dtype: object
        
            To avoid applying the function to missing values (and keep them as
            ``NaN``) ``na_action='ignore'`` can be used:
        
            >>> s.map('I am a {}'.format, na_action='ignore')
            0     I am a cat
            1     I am a dog
            2            NaN
            3  I am a rabbit
            dtype: object
        """

        return

    def data_tr_apply(self):

        # df.apply
        """
        apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds)
            Apply a function along an axis of the DataFrame.

            Objects passed to the function are Series objects whose index is
            either the DataFrame's index (``axis=0``) or the DataFrame's columns
            (``axis=1``). By default (``result_type=None``), the final return type
            is inferred from the return type of the applied function. Otherwise,
            it depends on the `result_type` argument.

            Parameters
            ----------
            func : function
                Function to apply to each column or row.
            axis : {0 or 'index', 1 or 'columns'}, default 0
                Axis along which the function is applied:

                * 0 or 'index': apply function to each column.
                * 1 or 'columns': apply function to each row.

            raw : bool, default False
                Determines if row or column is passed as a Series or ndarray object:

                * ``False`` : passes each row or column as a Series to the
                  function.
                * ``True`` : the passed function will receive ndarray objects
                  instead.
                  If you are just applying a NumPy reduction function this will
                  achieve much better performance.

            result_type : {'expand', 'reduce', 'broadcast', None}, default None
                These only act when ``axis=1`` (columns):
                * 'expand' : list-like results will be turned into columns.
                * 'reduce' : returns a Series if possible rather than expanding
                  list-like results. This is the opposite of 'expand'.
                * 'broadcast' : results will be broadcast to the original shape
                  of the DataFrame, the original index and columns will be
                  retained.

                The default behaviour (None) depends on the return value of the
                applied function: list-like results will be returned as a Series
                of those. However if the apply function returns a Series these
                are expanded to columns.

            args : tuple
                Positional arguments to pass to `func` in addition to the
                array/series.
            **kwds
                Additional keyword arguments to pass as keywords arguments to
                `func`.

            Returns
            -------
            Series or DataFrame
                Result of applying ``func`` along the given axis of the
                DataFrame.

            See Also
            --------
            DataFrame.applymap: For elementwise operations.
            DataFrame.aggregate: Only perform aggregating type operations.
            DataFrame.transform: Only perform transforming type operations.

            Examples
            --------
            >>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
            >>> df
               A  B
            0  4  9
            1  4  9
            2  4  9

            Using a numpy universal function (in this case the same as
            ``np.sqrt(df)``):

            >>> df.apply(np.sqrt)
                 A    B
            0  2.0  3.0
            1  2.0  3.0
            2  2.0  3.0

            Using a reducing function on either axis

            >>> df.apply(np.sum, axis=0)
            A    12
            B    27
            dtype: int64

            >>> df.apply(np.sum, axis=1)
            0    13
            1    13
            2    13
            dtype: int64

            Returning a list-like will result in a Series

            >>> df.apply(lambda x: [1, 2], axis=1)
            0    [1, 2]
            1    [1, 2]
            2    [1, 2]
            dtype: object

            Passing ``result_type='expand'`` will expand list-like results
            to columns of a Dataframe

            >>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
               0  1
            0  1  2
            1  1  2
            2  1  2

            Returning a Series inside the function is similar to passing
            ``result_type='expand'``. The resulting column names
            will be the Series index.

            >>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
               foo  bar
            0    1    2
            1    1    2
            2    1    2

            Passing ``result_type='broadcast'`` will ensure the same shape
            result, whether list-like or scalar is returned by the function,
            and broadcast it along the axis. The resulting column names will
            be the originals.

            >>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
               A  B
            0  1  2
            1  1  2
            2  1  2
        """

        df = self.df
        df = df.drop(labels=['t3', 't4'], level=1)
        df = df.unstack()

        def fexp(x, c, d=3):
            return x + c + d

        print(
            f" >>> df\n"
            f"{df}\n"

            f"{'=' * 80}\n"
            f"\n # 使用自定义函数，传递位置和关键字参数\n"
            f" >>> def fexp(x, c, d=3):\n"
            f"         return x + c + d\n"
            f" >>> df.apply(fexp, axis=0, args=(20,), d=5)\n"
            f"{df.apply(fexp, axis=0, args=(20,), d=5)}\n"

            f"{'=' * 80}\n"
            f"# 传递的函数可以分为标量、聚合和向量三种类型:\n"
            f"# 1)标量类型指输入数据与返回结果均为标量\n"
            f"# 2)聚合函数的输入为向量，输出为标量\n"
            f"# 3)向量函数的输入输出均为向量\n"

            f"{'=' * 80}\n"
            f"# 使用标量函数，对数据集的各个数据进行计算\n"
            f" >>> df.apply(np.sqrt)\n"
            f"{df.apply(np.sqrt)}\n"

            f"{'=' * 80}\n"
            f"# 使用聚合函数，沿轴向汇总计算，结果变为Series\n"
            f"{'-' * 80}\n"
            f"# 从使用max情况来看，传递给函数的数据是一维的列或行\n"
            f" >>> df.apply(lambda x: max(x), raw=True)\n"
            f"{df.apply(lambda x: max(x), raw=True)}\n"

            f"{'=' * 80}\n"
            f"# 使用向量函数，返回结果为向量形式数据（如数组、列表、Series等）\n"
            f"{'-' * 80}\n"
            f"# 设置raw=True方式传递数据，结果长度需与轴向数据长度相同, 长度不相等时会触发异常\n"
            f" >>> df.apply(lambda x: [1, 2], raw=True)\n"
            # f"{df.apply(lambda x: [1, 2], raw=True)}\n"
            "ValueError: Shape of passed values is (2, 4), indices imply (3, 4)\n"

            f"{'-' * 80}\n"
            f"# 返回结果为列表，长度等于数据长度，使用原索引\n"
            f" >>> df.apply(lambda x: [2, 5，np.mean(x)])\n"
            f"{df.apply(lambda x: [2, 5, np.mean(x)])}\n"
            f"{'-。' * 40}\n"
            f"# 返回结果为列表，长度不等于数据长度，则会重建索引\n"
            f" >>> df.apply(lambda x: [2, np.mean(x)])\n"
            f"{df.apply(lambda x: [2, np.mean(x)])}\n"

            f"{'-' * 80}\n"
            f"# 如果返回结果为Series，使用该Series索引\n"
            f" >>> df.apply(lambda x: pd.Series([2, 3, np.mean(x)]))\n"
            f"{df.apply(lambda x: pd.Series([2, 3, np.mean(x)]))}\n"
            f" >>> df.apply(lambda x: pd.Series([2, 3, np.mean(x)], index=[3, 5, 7]))\n"
            f"{df.apply(lambda x: pd.Series([2, 3, np.mean(x)], index=[3, 5, 7]))}\n"

            f"{'=' * 80}\n"
            f"# 使用向量函数时，返回结果数组沿轴向传递给数据集的行（列）\n"

            f"{'-' * 80}\n"
            f"# 如果返回结果长度不等于数据长度，则会重建索引，否则使用原索引\n"
            f" >>> df.apply(lambda x: [2, np.mean(x)])\n"
            f"{df.apply(lambda x: [2, np.mean(x)])}\n"

            f"{'-' * 80}\n"
            f"# 使用result_type设置，选择以哪种方式使用结果数据\n"
            f"{'-.' * 40}\n"
            f"# 设置为'expand'方式，返回结果展开为列数据\n"
            f" >>> df.apply(lambda x: [2, np.mean(x), max(x)], axis=1, result_type='expand')\n"
            f"{df.apply(lambda x: [2, np.mean(x), max(x)], axis=1, result_type='expand')}\n"

            f"{'-.' * 40}\n"
            f"# 设置为'reduce'方式，返回结果不展开\n"
            f" >>> df.apply(lambda x: [2, 3, np.mean(x)], axis=1, result_type='reduce')\n"
            f"{df.apply(lambda x: [2, 5, np.mean(x), max(x)], axis=1, result_type='reduce')}\n"

            f"{'-.' * 40}\n"
            f"# 设置为'broadcast'方式，返回结果广播为列数据, 列索引保持原有标签值\n"
            f" >>> df.apply(lambda x: [2, 3, np.mean(x)], axis=1, result_type='broadcast')\n"
            f"{df.apply(lambda x: [2, 5, np.mean(x), max(x)], axis=1, result_type='broadcast')}\n"

            f"{'-.' * 40}\n"
            f"# 设置为None(缺省方式)，返回结果为数组类型时, 不展开为列数据\n"
            f" >>> df.apply(lambda x: [2, 3, np.mean(x), max(x)], axis=1, result_type=None)\n"
            f"{df.apply(lambda x: [2, 5, np.mean(x), max(x)], axis=1, result_type=None)}\n"

            f"{'-.' * 40}\n"
            f"# 设置为None(缺省方式)，返回结果为Series类型时, 展开为列数据\n"
            f" >>> df.apply(lambda x: pd.Series([2, 3, np.mean(x), max(x)]), axis=1, result_type=None)\n"
            f"{df.apply(lambda x: pd.Series([2, 5, np.mean(x), max(x)]), axis=1, result_type=None)}\n"

            f"{'-' * 80}\n"
            f"\n # 设置为广播方式，长度必须与轴向数据长度相同，否则会触发异常\n"
            f" >>> df.apply(lambda x: [2, 5], axis=1, result_type='broadcast')\n"
            # f"{df.apply(lambda x: [2, 5], axis=1, result_type='broadcast')}\n"
            "ValueError: cannot broadcast result\n"

            f"{'-' * 80}\n"
            f"# 缺省设置result_type=None下，按照结果数据形式进行转换\n"
            f"{'-.' * 40}\n"
            f"# 结果为列表时，不展开到各个列，直接以列表作为数据，方法返回结果为Series\n"
            f" >>> df.apply(lambda x: [2, 3, 4, 5], axis=1)\n"
            f"{df.apply(lambda x: [2, 3, 4, 5], axis=1)}\n"
            f"{'-.' * 40}\n"
            f"#  结果为Series时，展开为列数据, 并使用Series中给出的索引为列名\n"
            f" >>> df.apply(lambda x: pd.Series([2, 3, 4, 5]), axis=1)\n"
            f"{df.apply(lambda x: pd.Series([2, 3, 4, 5]), axis=1)}\n"

            f"{'=' * 80}\n"
            f"\n # 使用向量运算，输入Series数据与数组相减\n"
            f"# 在列向上计算\n"
            f" >>> df.apply(lambda x: x-[min(x)], axis=1)\n"
            f"{df.apply(lambda x: x - [min(x)] * 4, axis=1)}\n"

            f"# 在行向上计算\n"
            f"  >>> df.apply(lambda x: x-[min(x)], axis=0)\n"
            f"{df.apply(lambda x: x - [min(x)], axis=0)}\n"

            f"{'=' * 80}\n"
            f"# 使用行位置切片返回结果\n"
            f"{'-.' * 40}\n"
            f"# 行方向切片\n"
            f" >>> df.apply(lambda x: x[0:2], axis=0)\n"
            f"{df.apply(lambda x: x[0:2], axis=0)}\n"
            f"{'-.' * 40}\n"
            f"# 列方向切片\n"
            f" >>> df.apply(lambda x: x[0:2], axis=1)\n"
            f"{df.apply(lambda x: x[0:2], axis=1)}\n"

            f"{'=' * 80}\n"
            f"# 使用位置序列返回结果\n"
            f"{'-.' * 40}\n"
            f"# 在列方向上，使用位置序列（列序号）\n"
            f" >>> df.apply(lambda x: x[[0, 2, 3]], axis=1)\n"
            f"{df.apply(lambda x: x[[0, 2, 3]], axis=1)}\n"
            f"{'-.' * 40}\n"
            f"# 在行方向上，使用行索引标签序列\n"
            f"  >>> df.apply(lambda x: x[[1, 2]], axis=0)\n"
            f"{df.apply(lambda x: x[[1, 2]], axis=0)}\n"

            f"{'=' * 80}\n"
        )

        # Series.apply
        """
        apply(self, func, convert_dtype=True, args=(), **kwds)
            Invoke function on values of Series.

            Can be ufunc (a NumPy function that applies to the entire Series)
            or a Python function that only works on single values.

            Parameters
            ----------
            func : function
                Python function or NumPy ufunc to apply.
            convert_dtype : bool, default True
                Try to find better dtype for elementwise function results. If
                False, leave as dtype=object.
            args : tuple
                Positional arguments passed to func after the series value.
            **kwds
                Additional keyword arguments passed to func.

            Returns
            -------
            Series or DataFrame
                If func returns a Series object the result will be a DataFrame.

            See Also
            --------
            Series.map: For element-wise operations.
            Series.agg: Only perform aggregating type operations.
            Series.transform: Only perform transforming type operations.

            Examples
            --------
            Create a series with typical summer temperatures for each city.

            >>> s = pd.Series([20, 21, 12],
             ...               index=['London', 'New York', 'Helsinki'])
            >>> s
            London      20
            New York    21
            Helsinki    12
            dtype: int64

            Square the values by defining a function and passing it as an
            argument to ``apply()``.

            >>> def square(x):
            ...     return x ** 2
            >>> s.apply(square)
            London      400
            New York    441
            Helsinki    144
            dtype: int64

            Square the values by passing an anonymous function as an
            argument to ``apply()``.

            >>> s.apply(lambda x: x ** 2)
            London      400
            New York    441
            Helsinki    144
            dtype: int64

            Define a custom function that needs additional positional
            arguments and pass these additional arguments using the
            ``args`` keyword.

            >>> def subtract_custom_value(x, custom_value):
            ...     return x - custom_value

            >>> s.apply(subtract_custom_value, args=(5,))
            London      15
            New York    16
            Helsinki     7
            dtype: int64

            Define a custom function that takes keyword arguments
            and pass these arguments to ``apply``.

            >>> def add_custom_values(x, **kwargs):
            ...     for month in kwargs:
            ...         x += kwargs[month]
            ...     return x

            >>> s.apply(add_custom_values, june=30, july=20, august=25)
            London      95
            New York    96
            Helsinki    87
            dtype: int64

            Use a function from the Numpy library.

            >>> s.apply(np.log)
            London      2.995732
            New York    3.044522
            Helsinki    2.484907
            dtype: int64

            Use a function from the Numpy library.

            >>> s.apply(np.log)
            London      2.995732
            New York    3.044522
            Helsinki    2.484907
            dtype: float64        
        """
        sr = pd.Series([1, 3, 5], index=['i', 'j', 'k'], dtype=np.uint16)

        def fexp(a, b, c=2):
            return a + b + c

        print(
            f"{'=' * 80}\n"
            f"# Series apply method\n"
            f" >>> sr\n"
            f"{sr}\n"
            f"{'-' * 80}\n"
            f"# 调用转换函数进行数据转换, 结果数据类型推断为int64\n"
            f" >>> sr.apply(lambda x: x**2)\n"
            f"{sr.apply(lambda x: x ** 2)}\n"
            f"# 设置convert_dtype=False, 结果数据类型转换为object\n"
            f" >>> sr.apply(lambda x: x**2, convert_dtype=False)\n"
            f"{sr.apply(lambda x: x ** 2, convert_dtype=False)}\n"
            f"{'-' * 80}\n"
            f"# 使用向量计算方式，会触发异常\n"
            f" >>> sr.apply(lambda x: x + max(x))\n"
            # f"{sr.apply(lambda x: x+max(x))}\n"
            f" ......\n"
            f"TypeError: 'int' object is not iterable\n"
            f"{'-' * 80}\n"
            " >>> sr.apply(fexp, args=(20,), **{'c': 10}\n"
            f"{sr.apply(fexp, args=(20,), **{'c': 10})}\n"
        )
        return

    def data_tr_apply_series(self):
        s = pd.Series([20, 21, 12], index = ['London', 'New York', 'Helsinki'])
        s1 = s.apply(lambda x, y, z=1: x ** 2 + y + z, args=(100,), z=200)

        def f(x, y, z=1):
            return x**2 + y + z

        s2 = s.apply(f, args=(100,), z=200)

        print(
            " >>> s = pd.Series([20, 21, 12], index = ['London', 'New York', 'Helsinki'])\n"
            " >>> s\n"
            f"{s}\n"
            f" # 使用函数进行数据转换\n"
            f" >>> s.apply(lambda x: x**2)\n"
            f"{s}\n"
            f" # 传递数据给转换函数参数\n"
            " >>> s.apply(lambda x, y, z=1: x**2+y+z, args=(100,), z=200)\n"
            f"{s1}\n"
            " # 或者写为函数形式：\n"
            " >>> def f(x, y, z=1):\n"
            " ...     return x**2+y+z\n"
            " >>> s.apply(f, args=(100,), z=200\n"
            f"{s2}"
        )


    def data_tr_assign(self):
        """
        :return:
        """
        """
        assign(self, **kwargs) -> 'DataFrame'
            Assign new columns to a DataFrame.
        
            Returns a new object with all original columns in addition to new ones.
            Existing columns that are re-assigned will be overwritten.
        
            Parameters
            ----------
            **kwargs : dict of {str: callable or Series}
                The column names are keywords. If the values are
                callable, they are computed on the DataFrame and
                assigned to the new columns. The callable must not
                change input DataFrame (though pandas doesn't check it).
                If the values are not callable, (e.g. a Series, scalar, or array),
                they are simply assigned.
            Returns
            -------
            DataFrame
                A new DataFrame with the new columns in addition to
                all the existing columns.
        
            Notes
            -----
            Assigning multiple columns within the same ``assign`` is possible.
            Later items in '**kwargs' may refer to newly created or modified
            columns in 'df'; items are computed and assigned into 'df' in order.
        
            Examples
            --------
            >>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
            ...                   index=['Portland', 'Berkeley'])
            >>> df
                      temp_c
            Portland    17.0
            Berkeley    25.0
        
            Where the value is a callable, evaluated on `df`:
        
            >>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
                      temp_c  temp_f
            Portland    17.0    62.6
            Berkeley    25.0    77.0
        
            Alternatively, the same behavior can be achieved by directly
            referencing an existing Series or sequence:
        
            >>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
                      temp_c  temp_f
            Portland    17.0    62.6
            Berkeley    25.0    77.0
        
            You can create multiple columns within the same assign where one
            of the columns depends on another one defined within the same assign:
        
            >>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
            ...           temp_k=lambda x: (x['temp_f'] +  459.67) * 5 / 9)
                      temp_c  temp_f  temp_k
            Portland    17.0    62.6  290.15
            Berkeley    25.0    77.0  298.15
        """

        df = pd.DataFrame(
            data={'A':[1, 2, 3]}
        )

        print(
            f" >>> df\n"
            f"{df}\n"
            
            f"{'-'*80}\n"
            "# 使用关键字，指定建立一个新的数据列\n"
            " >>> df.assign(B=[2, 4, 6])\n"
            f"{df.assign(B=[2, 4, 6])}\n"
            " >>> df.assign(B=100)\n"
            f"{df.assign(B=100)}\n"

            f"{'-'*80}\n"
            "# 可以同时生成多个数据列\n"
            " >>> df.assign(B=[2, 4, 6], C=df.A + 100)\n"
            f"{df.assign(B=[2, 4, 6], C=df.A + 100)}\n"

            f"{'-'*80}\n"
            "# 使用函数计算新的数据列\n"
            " >>> df.assign(D=lambda x: x['A']*3)\n"
            f"{df.assign(D=lambda x: x['A'] * 3)}\n"
            
            f"{'-'*80}\n"
            "# 使用函数计算时，可以使用前面参数计算的列数据\n"
            " >>> df.assign(D=lambda x: x['A']+x['B'])\n"
            f"{df.assign(B=df.A*2, D=lambda x: x['A']+x['B'])}\n"
        )

        return

    def data_tr_map(self):
        """
        map(self, arg, na_action=None) -> 'Series'
            Map values of Series according to input correspondence.

            Used for substituting each value in a Series with another value,
            that may be derived from a function, a ``dict`` or
            a :class:`Series`.

            Parameters
            ----------
            arg : function, collections.abc.Mapping subclass or Series
                Mapping correspondence.
            na_action : {None, 'ignore'}, default None
                If 'ignore', propagate NaN values, without passing them to the
                mapping correspondence.

            Returns
            -------
            Series
                Same index as caller.

            See Also
            --------
            Series.apply : For applying more complex functions on a Series.
            DataFrame.apply : Apply a function row-/column-wise.
            DataFrame.applymap : Apply a function elementwise on a whole DataFrame.

            Notes
            -----
            When ``arg`` is a dictionary, values in Series that are not in the
            dictionary (as keys) are converted to ``NaN``. However, if the
            dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e.
            provides a method for default values), then this default is used
            rather than ``NaN``.

            Examples
            --------
            >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit'])
            >>> s
            0      cat
            1      dog
            2      NaN
            3   rabbit
            dtype: object

            ``map`` accepts a ``dict`` or a ``Series``. Values that are not found
            in the ``dict`` are converted to ``NaN``, unless the dict has a default
            value (e.g. ``defaultdict``):

            >>> s.map({'cat': 'kitten', 'dog': 'puppy'})
            0   kitten
            1    puppy
            2      NaN
            3      NaN
            dtype: object

            It also accepts a function:

            >>> s.map('I am a {}'.format)
            1       I am a dog
            2       I am a nan
            3    I am a rabbit
            dtype: object

            To avoid applying the function to missing values (and keep them as
            ``NaN``) ``na_action='ignore'`` can be used:

            >>> s.map('I am a {}'.format, na_action='ignore')
            0     I am a cat
            1     I am a dog
            2            NaN
            3  I am a rabbit
            dtype: object
        """
        sr1 = pd.Series([66, 85, 50, 70, 30], dtype=np.uint8)
        sr2 = pd.Series([66, 85, 50, np.NaN, 30])
        print(
            f" >>> sr1\n"
            f"{sr1}\n"
            f"# 使用转换函数\n"
            f" >>> sr.map(lambda x: 'pass' if x > 65 else 'fail')\n"
            f"{sr1.map(lambda x: 'pass' if x > 65 else 'fail')}\n"

            f"{'-'*80}\n"
            f"# 忽略NaN值转换\n"
            f" >>> sr2\n"
            f"{sr2}\n"
            f" >>> sr.map(lambda x: 'pass' if x > 65 else 'fail', na_action='ignore')\n"
            f"{sr2.map(lambda x: 'pass' if x > 65 else 'fail', na_action='ignore')}\n"
            
            f"{'-'*80}\n"
            f"# 使用字典进行映射转换\n"
            " >>> sr2.map({85: 'excel', 66: 'pass', 50: 'likely', 30: 'fail', np.NaN: 'miss'})\n"
            f"{sr2.map({85: 'excel', 66: 'pass', 50: 'likely', 30: 'fail', np.NaN: 'miss'})}\n"
            
            f"{'-'*80}\n"
            f"# 结果类型为字符串时，不接受NaN值\n"
            " >>> sr2.map({85: 'excel', 66: 'pass', 50: 'likely', 30: 'fail'}, na_action='ignore')\n"
            # f"{sr2.map({85: 'excel', 66: 'pass', 50: 'likely', 30: 'fail'}, na_action='ignore')}"
            f"ValueError: Invalid format specifier\n"

            f"{'-'*80}\n"
            f"# 使用Series进行映射转换\n"
            f" >>> sr3\n"
            f"{pd.Series({85: 'excel', 66: 'pass', 50: 'likely', 30: 'fail', np.NaN: 'miss'})}\n"
            f" >>> sr2.map(sr3)\n"
            f"{sr2.map(pd.Series({85: 'excel', 66: 'pass', 50: 'likely', 30: 'fail', np.NaN: 'miss'}))}\n"
        )
        return

    def data_tr_replace(self, mode=1):
        """
        replace(self, to_replace=None, value=None, inplace=False, limit=None, regex=False, method='pad')

            Replace values given in `to_replace` with `value`.

            Values of the DataFrame are replaced with other values dynamically.
            This differs from updating with ``.loc`` or ``.iloc``, which require
            you to specify a location to update with some value.

            Parameters
            ----------
            to_replace : str, regex, list, dict, Series, int, float, or None
                How to find the values that will be replaced.

                * numeric, str or regex:
                    - numeric: numeric values equal to `to_replace` will be
                      replaced with `value`
                    - str: string exactly matching `to_replace` will be replaced
                      with `value`
                    - regex: regexs matching `to_replace` will be replaced with
                      `value`

                * list of str, regex, or numeric:

                    - First, if `to_replace` and `value` are both lists, they
                      **must** be the same length.
                    - Second, if ``regex=True`` then all of the strings in **both**
                      lists will be interpreted as regexs otherwise they will match
                      directly. This doesn't matter much for `value` since there
                      are only a few possible substitution regexes you can use.
                    - str, regex and numeric rules apply as above.

                * dict:

                    - Dicts can be used to specify different replacement values
                      for different existing values. For example,
                      ``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
                      'y' with 'z'. To use a dict in this way the `value`
                      parameter should be `None`.
                    - For a DataFrame a dict can specify that different values
                      should be replaced in different columns. For example,
                      ``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
                      and the value 'z' in column 'b' and replaces these values
                      with whatever is specified in `value`. The `value` parameter
                      should not be ``None`` in this case. You can treat this as a
                      special case of passing two lists except that you are
                      specifying the column to search in.
                    - For a DataFrame nested dictionaries, e.g.,
                      ``{'a': {'b': np.nan}}``, are read as follows: look in column
                      'a' for the value 'b' and replace it with NaN. The `value`
                      parameter should be ``None`` to use a nested dict in this
                      way. You can nest regular expressions as well. Note that
                      column names (the top-level dictionary keys in a nested
                      dictionary) **cannot** be regular expressions.
                * None:

                    - This means that the `regex` argument must be a string,
                      compiled regular expression, or list, dict, ndarray or
                      Series of such elements. If `value` is also ``None`` then
                      this **must** be a nested dictionary or Series.

                See the examples section for examples of each of these.
            value : scalar, dict, list, str, regex, default None
                Value to replace any values matching `to_replace` with.
                For a DataFrame a dict of values can be used to specify which
                value to use for each column (columns not in the dict will not be
                filled). Regular expressions, strings and lists or dicts of such
                objects are also allowed.
            inplace : bool, default False
                If True, in place. Note: this will modify any
                other views on this object (e.g. a column from a DataFrame).
                Returns the caller if this is True.
            limit : int or None, default None
                Maximum size gap to forward or backward fill.
            regex : bool or same types as `to_replace`, default False
                Whether to interpret `to_replace` and/or `value` as regular
                expressions. If this is ``True`` then `to_replace` *must* be a
                string. Alternatively, this could be a regular expression or a
                list, dict, or array of regular expressions in which case
                `to_replace` must be ``None``.
            method : {'pad', 'ffill', 'bfill', `None`}
                The method to use when for replacement, when `to_replace` is a
                scalar, list or tuple and `value` is ``None``.

            Returns
            -------
            DataFrame or None
                Object after replacement or None if ``inplace=True``.

            Raises
            ------
            AssertionError
                * If `regex` is not a ``bool`` and `to_replace` is not
                  ``None``.

            TypeError
                * If `to_replace` is not a scalar, array-like, ``dict``, or ``None``
                * If `to_replace` is a ``dict`` and `value` is not a ``list``,
                  ``dict``, ``ndarray``, or ``Series``
                * If `to_replace` is ``None`` and `regex` is not compilable
                  into a regular expression or is a list, dict, ndarray, or
                  Series.
                * When replacing multiple ``bool`` or ``datetime64`` objects and
                  the arguments to `to_replace` does not match the type of the
                  value being replaced

            ValueError
                * If a ``list`` or an ``ndarray`` is passed to `to_replace` and
                  `value` but they are not the same length.

            See Also
            --------
            DataFrame.fillna : Fill NA values.
            DataFrame.where : Replace values based on boolean condition.
            Series.str.replace : Simple string replacement.

            Notes
            -----
            * Regex substitution is performed under the hood with ``re.sub``. The
              rules for substitution for ``re.sub`` are the same.
            * Regular expressions will only substitute on strings, meaning you
              cannot provide, for example, a regular expression matching floating
              point numbers and expect the columns in your frame that have a
              numeric dtype to be matched. However, if those floating point
              numbers *are* strings, then you can do this.
            * This method has *a lot* of options. You are encouraged to experiment
              and play with this method to gain intuition about how it works.
            * When dict is used as the `to_replace` value, it is like
              key(s) in the dict are the to_replace part and
              value(s) in the dict are the value parameter.

            Examples
            --------

            **Scalar `to_replace` and `value`**

            >>> s = pd.Series([0, 1, 2, 3, 4])
            >>> s.replace(0, 5)
            0    5
            1    1
            2    2
            3    3
            4    4
            dtype: int64

            >>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
            ...                    'B': [5, 6, 7, 8, 9],
            ...                    'C': ['a', 'b', 'c', 'd', 'e']})
            >>> df.replace(0, 5)
               A  B  C
            0  5  5  a
            1  1  6  b
            2  2  7  c
            3  3  8  d
            4  4  9  e

            **List-like `to_replace`**

            >>> df.replace([0, 1, 2, 3], 4)
               A  B  C
            0  4  5  a
            1  4  6  b
            2  4  7  c
            3  4  8  d
            4  4  9  e
            >>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
               A  B  C
            0  4  5  a
            1  3  6  b
            2  2  7  c
            3  1  8  d
            4  4  9  e

            >>> s.replace([1, 2], method='bfill')
            0    0
            1    3
            2    3
            3    3
            4    4
            dtype: int64

            **dict-like `to_replace`**

            >>> df.replace({0: 10, 1: 100})
                 A  B  C
            0   10  5  a
            1  100  6  b
            2    2  7  c
            3    3  8  d
            4    4  9  e

            >>> df.replace({'A': 0, 'B': 5}, 100)
                 A    B  C
            0  100  100  a
            1    1    6  b
            2    2    7  c
            3    3    8  d
            4    4    9  e

            >>> df.replace({'A': {0: 100, 4: 400}})
                 A  B  C
            0  100  5  a
            1    1  6  b
            2    2  7  c
            3    3  8  d
            4  400  9  e

            **Regular expression `to_replace`**

            >>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
            ...                    'B': ['abc', 'bar', 'xyz']})
            >>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
                  A    B
            0   new  abc
            1   foo  new
            2  bait  xyz

            >>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
                  A    B
            0   new  abc
            1   foo  bar
            2  bait  xyz

            >>> df.replace(regex=r'^ba.$', value='new')
                  A    B
            0   new  abc
            1   foo  new
            2  bait  xyz

            >>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
                  A    B
            0   new  abc
            1   xyz  new
            2  bait  xyz

            >>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
                  A    B
            0   new  abc
            1   new  new
            2  bait  xyz

            Compare the behavior of ``s.replace({'a': None})`` and
            ``s.replace('a', None)`` to understand the peculiarities
            of the `to_replace` parameter:

            >>> s = pd.Series([10, 'a', 'a', 'b', 'a'])

            When one uses a dict as the `to_replace` value, it is like the
            value(s) in the dict are equal to the `value` parameter.
            ``s.replace({'a': None})`` is equivalent to
            ``s.replace(to_replace={'a': None}, value=None, method=None)``:

            >>> s.replace({'a': None})
            0      10
            1    None
            2    None
            3       b
            4    None
            dtype: object

            When ``value=None`` and `to_replace` is a scalar, list or
            tuple, `replace` uses the method parameter (default 'pad') to do the
            replacement. So this is why the 'a' values are being replaced by 10
            in rows 1 and 2 and 'b' in row 4 in this case.
            The command ``s.replace('a', None)`` is actually equivalent to
            ``s.replace(to_replace='a', value=None, method='pad')``:

            >>> s.replace('a', None)
            0    10
            1    10
            2    10
            3     b
            4     b
            dtype: object
        """
        if mode == 1:
            import re
            df = pd.DataFrame({'name': ['Wang', 'Han', 'Jack', 'Jim'], 'score': [60, 90, 60, 50]})
            dict1 = {'Jim': 'Lee', 'Wang': 'Maa'}
            dict2 = {90: 99}
            dict3 = {'name': dict1, 'score': dict2}
            regex_exp = re.compile(r'^.[a].*')
            print(
                " >>> df\n"
                f"{df}\n"
                f"{'='*80}\n"
                f"# 使用标量作为to_replace参数\n"
                f"{'-'*80}\n"
                f" >>> df.replace(60, 80)\n"
                f"{df.replace(60, 80)}\n"
                f"{'-'*80}\n"
                f" >>> df.replace('Han', 'Xie')\n"
                f"{df.replace('Han', 'Xie')}\n"

                f"{'-'*80}\n"
                f"# value=None，使用填充方法进行替换\n"
                f"{'-.'*40}\n"
                f" >>> df.replace(60, None, method='pad')   # 使用向前取值方式替换\n"
                f"{df.replace(60, None, method='pad')}\n"
                f"{'-.'*40}\n"
                f" >>> df.replace(60, None, method='ffill')   # 使用向前取值方式替换\n"
                f"{df.replace(60, None, method='ffill')}\n"
                f"{'-.'*40}\n"
                f" >>> df.replace(60, None, method='bfill'   # 使用向后取值方式替换)\n"
                f"{df.replace(60, None, method='bfill')}\n"

                f"{'='*80}\n"
                f"# 使用列表作为to_replace参数变量\n"
                f" >>> df.replace(['Jim', 'Wang', 100], ['Foo', 'Loo', 100])\n"
                f"{df.replace(['Jim', 'Wang', 90], ['Foo', 'Loo', 100])}\n"

                f"{'='*80}\n"
                f"# 使用字典作为to_replace参数变量\n"
                f"# 使用不限定范围的字典方式\n"
                " >>> df.replace({'Jim': 'Michel', 90: 120})\n"
                f"{df.replace({'Jim': 'Michel', 90: 120})}\n"
                f"# 使用嵌套字典方式, 限定列进行匹配\n"
                " >>> df.replace({’name': {'Jim': 'Lee', 'Wang': 'Maa'}, 'score': {90: 99}})\n"
                f"{df.replace(dict3)}\n"

                f"{'='*80}\n"
                f"# 使用正则表达式方式\n"
                f"{'-'*80}\n"
                f"# 未设置regex=True，to_replace赋值视为普通字符串\n"
                f" >>> df.replace(to_replace=r'^.[a].*', value='Frank')\n"
                f"{df.replace(to_replace=r'^.[a].*', value='Frank')}\n"
                f"{'-.'*40}\n"
                f"# 设置regex=True，to_replace赋值视为正则表达式\n"
                f" >>> df.replace(to_replace=r'^.[a].*', value='Frank'，regex=True)\n"
                f"{df.replace(to_replace=r'^.[a].*', value='Frank', regex=True)}\n"
                f"{'-.'*40}\n"
                f"# 使用编译的模式传递给to_replace，可以被识别为正则表达式\n"
                f" >>> df.replace(to_replace=re.compile(r'^.[a].*'), value='Frank')\n"
                f"{df.replace(to_replace=re.compile(r'^.[a].*'), value='Frank')}\n"

                f"{'-'*80}\n"
                f"# 单独使用正则表达式方式\n"
                f"{'-.'*40}\n"
                f"# 使用regex匹配以J开始的字符串，设置value='Mark'，进行替换\n"
                f" >>> df.replace(regex='^J.*', value='Mark')\n"
                f"{df.replace(regex='^J.*', value='Mark')}\n"
                f"{'-.'*40}\n"
                f"# 使用regex匹配以J开始的字符串及'Han'，设置value='Liu'，进行替换\n"
                f" >>> df.replace(regex=['^J.*', 'Han'], value='Liu')\n"
                f"{df.replace(regex=['^J.*', 'Han'], value='Liu')}\n"
                f"{'-.'*40}\n"
                f"# 使用字典方式设置regex，匹配包含字符n的字符串，替换为Jin\n"
                " >>> df.replace(regex={'.*[n].*': 'Jin'}}\n"
                f"{df.replace(regex=dict([('.*[n].*', 'Jin')]))}\n"

                f"{'=' * 80}"
            )

        return

    def data_tr_transform(self):
        """
        transform(self, func: 'AggFuncType', axis: 'Axis' = 0, *args, **kwargs) -> 'DataFrame'

            Call ``func`` on self producing a DataFrame with transformed values.

            Produced DataFrame will have same axis length as self.

            Parameters
            ----------
            func : function, str, list-like or dict-like
                Function to use for transforming the data. If a function, must either
                work when passed a DataFrame or when passed to DataFrame.apply. If func
                is both list-like and dict-like, dict-like behavior takes precedence.
                Accepted combinations are:
                - function
                - string function name
                - list-like of functions and/or function names, e.g. ``[np.exp, 'sqrt']``
                - dict-like of axis labels -> functions, function names or list-like of such.
            axis : {0 or 'index', 1 or 'columns'}, default 0
                    If 0 or 'index': apply function to each column.
                    If 1 or 'columns': apply function to each row.
            *args
                Positional arguments to pass to `func`.
            **kwargs
                Keyword arguments to pass to `func`.

            Returns
            -------
            DataFrame
                A DataFrame that must have the same length as self.

            Raises
            ------
            ValueError : If the returned DataFrame has a different length than self.

            See Also
            --------
            DataFrame.agg : Only perform aggregating type operations.
            DataFrame.apply : Invoke function on a DataFrame.

            Examples
            --------
            >>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
            >>> df
               A  B
            0  0  1
            1  1  2
            2  2  3
            >>> df.transform(lambda x: x + 1)
               A  B
            0  1  2
            1  2  3
            2  3  4

            Even though the resulting DataFrame must have the same length as the
            input DataFrame, it is possible to provide several input functions:

            >>> s = pd.Series(range(3))
            >>> s
            0    0
            1    1
            2    2
            dtype: int64
            >>> s.transform([np.sqrt, np.exp])
                   sqrt        exp
            0  0.000000   1.000000
            1  1.000000   2.718282
            2  1.414214   7.389056

            You can call transform on a GroupBy object:

            >>> df = pd.DataFrame({
            ...     "Date": [
            ...         "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05",
            ...         "2015-05-08", "2015-05-07", "2015-05-06", "2015-05-05"],
            ...     "Data": [5, 8, 6, 1, 50, 100, 60, 120],
            ... })
            >>> df
                     Date  Data
            0  2015-05-08     5
            1  2015-05-07     8
            2  2015-05-06     6
            3  2015-05-05     1
            4  2015-05-08    50
            5  2015-05-07   100
            6  2015-05-06    60
            7  2015-05-05   120
            >>> df.groupby('Date')['Data'].transform('sum')
            0     55
            1    108
            2     66
            3    121
            4     55
            5    108
            6     66
            7    121
            Name: Data, dtype: int64

            >>> df = pd.DataFrame({
            ...     "c": [1, 1, 1, 2, 2, 2, 2],
            ...     "type": ["m", "n", "o", "m", "m", "n", "n"]
            ... })
            >>> df
               c type
            0  1    m
            1  1    n
            2  1    o
            3  2    m
            4  2    m
            5  2    n
            6  2    n
            >>> df['size'] = df.groupby('c')['type'].transform(len)
            >>> df
               c type size
            0  1    m    3
            1  1    n    3
            2  1    o    3
            3  2    m    4
            4  2    m    4
            5  2    n    4
            6  2    n    4
        """

        df = self.df
        df = df.iloc[:5]
        df.index = range(5)
        df.iloc[0, 0] = 0
        df.iat[3, 1] = 0

        def c12(x, c=1.2):
            return x*c

        def myfun(x, score=63):
            return True if x >= score else False

        print(
            f"{df.info()}\n"
            f" >>> df\n"
            f"{df}\n"
            
            f"# 普通运算函数作用于列数据处理，而DataFrame作为输入参数时要使用向量函数\n"
            f"# 如果使用普通逻辑判断等语句，需使用[]限定函数用于DataFrame.apply的输入函数\n"
            f"# 如果使用复杂判断，可以使用Numpy的相关向量函数\n"

            f"# 不放在[]中会触发异常：ValueError: Transform function failed\n"
            f" >>> df.transform([lambda x: 100 if x > 63 else 60])\n"
            f"{df.transform([lambda x: 100 if x > 63 else 60])}\n"
            
            "# 也可以放在{}中针对某些列进行转换\n"
            " >>> df.transform({'math': lambda x: 100 if x >= 63 else 60})\n"
            f"{df.transform({'math': lambda x: 100 if x >= 63 else 60})}\n"

            f"# 使用Numpy.where不会触发异常\n"
            f" >>> df.transform(lambda x: np.where(x<63, 0, 1))\n"
            f"{df.transform(lambda x: np.where(x<63, 0, 1))}\n"

            f" >>> def c12(x, c=1.2):\n"
            f"         return x*c\n"
            f"\n"
            f" >>> def myfun(x, score=63):\n"
            f"         return True if x >= score else False\n"
            f"\n"
            f" >>> df.transform(func=[np.int, myfun, c12])\n"
            f"{df.transform(func=[np.int, myfun, c12])}\n"
            f"\n"
            f"# 设定轴向axis=1后，会在列方向进行计算\n"
            f" >>> df.transform(func=[np.int, myfun], axis=1)\n"
            f"{df.transform(func=[np.int, myfun], axis=1)}"
        )
        return df

    def data_tr_agg(self):
        """
        aggregate(self, func=None, axis=0, *args, **kwargs)
            Aggregate using one or more operations over the specified axis.

            Parameters
            ----------
            func : function, str, list or dict
                Function to use for aggregating the data. If a function, must either
                work when passed a DataFrame or when passed to DataFrame.apply.

                Accepted combinations are:

                - function
                - string function name
                - list of functions and/or function names, e.g. ``[np.sum, 'mean']``
                - dict of axis labels -> functions, function names or list of such.
            axis : {0 or 'index', 1 or 'columns'}, default 0
                    If 0 or 'index': apply function to each column.
                    If 1 or 'columns': apply function to each row.
            *args
                Positional arguments to pass to `func`.
            **kwargs
                Keyword arguments to pass to `func`.

            Returns
            -------
            scalar, Series or DataFrame

                The return can be:

                * scalar : when Series.agg is called with single function
                * Series : when DataFrame.agg is called with a single function
                * DataFrame : when DataFrame.agg is called with several functions

                Return scalar, Series or DataFrame.

            The aggregation operations are always performed over an axis, either the
            index (default) or the column axis. This behavior is different from
            `numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
            `var`), where the default is to compute the aggregation of the flattened
            array, e.g., ``numpy.mean(arr_2d)`` as opposed to
            ``numpy.mean(arr_2d, axis=0)``.

            `agg` is an alias for `aggregate`. Use the alias.

            See Also
            --------
            DataFrame.apply : Perform any type of operations.
            DataFrame.transform : Perform transformation type operations.
            core.groupby.GroupBy : Perform operations over groups.
            core.resample.Resampler : Perform operations over resampled bins.
            core.window.Rolling : Perform operations over rolling window.
            core.window.Expanding : Perform operations over expanding window.
            core.window.ExponentialMovingWindow : Perform operation over exponential weighted
                window.

            Notes
            -----
            `agg` is an alias for `aggregate`. Use the alias.

            A passed user-defined-function will be passed a Series for evaluation.

            Examples
            --------
            >>> df = pd.DataFrame([[1, 2, 3],
            ...                    [4, 5, 6],
            ...                    [7, 8, 9],
            ...                    [np.nan, np.nan, np.nan]],
            ...                   columns=['A', 'B', 'C'])

            Aggregate these functions over the rows.

            >>> df.agg(['sum', 'min'])
                    A     B     C
            sum  12.0  15.0  18.0
            min   1.0   2.0   3.0

            Different aggregations per column.

            >>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
                    A    B
            sum  12.0  NaN
            min   1.0  2.0
            max   NaN  8.0

            Aggregate different functions over the columns and rename the index of the resulting
            DataFrame.

            >>> df.agg(x=('A', max), y=('B', 'min'), z=('C', np.mean))
                 A    B    C
            x  7.0  NaN  NaN
            y  NaN  2.0  NaN
            z  NaN  NaN  6.0

            Aggregate over the columns.

            >>> df.agg("mean", axis="columns")
            0    2.0
            1    5.0
            2    8.0
            3    NaN
            dtype: float64

        :return:
        """
        df = pd.DataFrame(
            {
             # 'id': range(5),
             'id': ['{:02d}'.format(i+1) for i in range(5)],
             'name': ['Zhang', 'Wang', 'Li', 'Zhao', 'Sun'],
             'km1': [80, 90, 70, 65, 82],
             'km2': [90, 85, 56, 88, 76]
             }
        )
        df = df.set_index(keys=df.id).drop(labels=['id'], axis=1)

        def maxlen(sr):
            return np.max(sr.apply(lambda x: len(x)))

        print(
            f"{'-'*80}\n"
            " >>> df\n"
            f"{df}\n"
            f"{df.info()}\n"

            f"{'-'*80}\n"
            f"# 使用聚合函数，将根据推断形成计算结果\n"
            f" >>> df.agg([max, np.mean])\n"
            f"{df.agg([max, np.mean])}\n"

            f"{'-'*80}\n"
            f"# 沿列向使用聚合函数进行计算\n"
            f" >>> df[['km1', 'km2']].agg([np.sum, np.mean], axis=1)\n"
            f"{df[['km1', 'km2']].agg([np.sum, np.mean], axis=1)}\n"

            f"{'-'*80}\n"
            f"# 不能混合使用聚合函数和变换函数\n"
            f" >>> df.agg([np.size, np.mean])\n"
            # f"{df.agg([np.size, np.mean])}\n"
            f"ValueError: cannot combine transform and aggregation operations\n"
            f"（数值错误：不能组合变换计算和聚合计算操作，np.size函数不属于聚合数据计算!）"

            f"{'-'*80}\n"
            f"# 使用非聚合函数，存在不能计算的原数据列，会触发异常TypeError\n"
            " >>> df.agg(np.sqrt)\n"
            "TypeError: loop of ufunc does not support argument 0 of type str which has no callable sqrt method\n"
            f"{'-.'*40}\n"
            "# 使用多个非聚合函数，只要有一个函数有效，则不会触发异常\n"
            " >>> df.agg([np.alen, np.sqrt])\n"
            f"{df.agg([np.alen, np.sqrt])}\n"

            f"{'-'*80}\n"
            f"# 使用非聚合函数，返回原数据计算结果\n"
            " >>> df.agg({'name': str.upper, 'km1':np.int8, 'km2': np.int16})\n"
            f"{df.agg({'name': str.upper, 'km1':np.square, 'km2': np.log10})}\n"

            f"{'-'*80}\n"
            f"# 使用多种类型的聚合函数，不能进行计算的数据类型返回NaN值结果\n"
            " >>> df.agg({'name': maxlen\n"
            "             'km1': [max, np.mean, np.median], \n"
            "             'km2': [max, np.mean, np.median]})\n"
            f"{df.agg({'name':maxlen, 'km1': [max, np.mean, np.median], 'km2': [max, np.mean, np.median]})}\n"
        )

        return

    def data_tr_fillna(self):
        """
        fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None)
            -> 'Optional[DataFrame]'

            Fill NA/NaN values using the specified method.

            Parameters
            ----------
            value : scalar, dict, Series, or DataFrame
                Value to use to fill holes (e.g. 0), alternately a
                dict/Series/DataFrame of values specifying which value to use for
                each index (for a Series) or column (for a DataFrame).  Values not
                in the dict/Series/DataFrame will not be filled. This value cannot
                be a list.
            method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
                Method to use for filling holes in reindexed Series
                pad / ffill: propagate last valid observation forward to next valid
                backfill / bfill: use next valid observation to fill gap.
            axis : {0 or 'index', 1 or 'columns'}
                Axis along which to fill missing values.
            inplace : bool, default False
                If True, fill in-place. Note: this will modify any
                other views on this object (e.g., a no-copy slice for a column in a
                DataFrame).
            limit : int, default None
                If method is specified, this is the maximum number of consecutive
                NaN values to forward/backward fill. In other words, if there is
                a gap with more than this number of consecutive NaNs, it will only
                be partially filled. If method is not specified, this is the
                maximum number of entries along the entire axis where NaNs will be
                filled. Must be greater than 0 if not None.
            downcast : dict, default is None
                A dict of item->dtype of what to downcast if possible,
                or the string 'infer' which will try to downcast to an appropriate
                equal type (e.g. float64 to int64 if possible).

            Returns
            -------
            DataFrame or None
                Object with missing values filled or None if ``inplace=True``.

            See Also
            --------
            interpolate : Fill NaN values using interpolation.
            reindex : Conform object to new index.
            asfreq : Convert TimeSeries to specified frequency.

            Examples
            --------
            >>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
            ...                    [3, 4, np.nan, 1],
            ...                    [np.nan, np.nan, np.nan, 5],
            ...                    [np.nan, 3, np.nan, 4]],
            ...                   columns=list('ABCD'))
            >>> df
                 A    B   C  D
            0  NaN  2.0 NaN  0
            1  3.0  4.0 NaN  1
            2  NaN  NaN NaN  5
            3  NaN  3.0 NaN  4

            Replace all NaN elements with 0s.

            >>> df.fillna(0)
                A   B   C   D
            0   0.0 2.0 0.0 0
            1   3.0 4.0 0.0 1
            2   0.0 0.0 0.0 5
            3   0.0 3.0 0.0 4

            We can also propagate non-null values forward or backward.

            >>> df.fillna(method='ffill')
                A   B   C   D
            0   NaN 2.0 NaN 0
            1   3.0 4.0 NaN 1
            2   3.0 4.0 NaN 5
            3   3.0 3.0 NaN 4

            Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
            2, and 3 respectively.

            >>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
            >>> df.fillna(value=values)
                A   B   C   D
            0   0.0 2.0 2.0 0
            1   3.0 4.0 2.0 1
            2   0.0 1.0 2.0 5
            3   0.0 3.0 2.0 4

            Only replace the first NaN element.

            >>> df.fillna(value=values, limit=1)
                A   B   C   D
            0   0.0 2.0 2.0 0
            1   3.0 4.0 NaN 1
            2   NaN 1.0 NaN 5
            3   NaN 3.0 NaN 4
        """
        df = self.df.copy()
        df = df.iloc[0:6, :]
        df.math = range(50, 56)
        df.science = range(60, 66)
        df.index = range(6)
        df.iloc[[0, 2, 3, 5], 0] = np.NaN
        df.iloc[2:5, 1] = np.NaN
        # df.iloc[1, 0:1] = np.NaN

        print(
            f" >>> df\n"
            f"{df}\n"

            f"# 将NaN值替换为100\n"
            f" >>> df.fillna(100)\n"
            f"{df.fillna(100)}\n"

            f"# 限制替换的数量为2\n"
            f" >>> df.fillna(100, limit=2)\n"
            f"{df.fillna(100, limit=2)}\n"

            f"# 使用字典按列分别设置填充值\n"
            " >>> {df.fillna({'math': 150, 'science': 200})\n"
            f"{df.fillna({'math': 100, 'science': 200})}\n"

            f"# 限定填充次数2和方式ffill\n"
            f" >>> df.fillna(limit=2, method='ffill')\n"
            f"{df.fillna(limit=2, method='ffill')}\n"

            f"# 限定填充次数2和方式bfill\n"
            f" >>> df.fillna(limit=2, method='bfill')\n"
            f"{df.fillna(limit=2, method='bfill')}\n"

            f"# 目前仅支持沿轴向axis=0按列进行替换，同时，downcast仅支持使用'infer'\n"
            f" >>> df.fillna(120, limit=2, axis='columns', downcast='infer')\n"
            f"{df.fillna(150, limit=2, axis='columns', downcast='infer')}\n"
        )

        # fillna for Series
        """
        fillna(self, value=None, method=None, axis=None, inplace=False, limit=None, downcast=None)
            -> Union[ForwardRef('Series'), NoneType]
         
            Fill NA/NaN values using the specified method.
        
            Parameters
            ----------
            value : scalar, dict, Series, or DataFrame
                Value to use to fill holes (e.g. 0), alternately a
                dict/Series/DataFrame of values specifying which value to use for
                each index (for a Series) or column (for a DataFrame).  Values not
                in the dict/Series/DataFrame will not be filled. This value cannot
                be a list.
            method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
                Method to use for filling holes in reindexed Series
                pad / ffill: propagate last valid observation forward to next valid
                backfill / bfill: use next valid observation to fill gap.
            axis : {0 or 'index'}
                Axis along which to fill missing values.
            inplace : bool, default False
                If True, fill in-place. Note: this will modify any
                other views on this object (e.g., a no-copy slice for a column in a
                DataFrame).
            limit : int, default None
                If method is specified, this is the maximum number of consecutive
                NaN values to forward/backward fill. In other words, if there is
                a gap with more than this number of consecutive NaNs, it will only
                be partially filled. If method is not specified, this is the
                maximum number of entries along the entire axis where NaNs will be
                filled. Must be greater than 0 if not None.
            downcast : dict, default is None
                A dict of item->dtype of what to downcast if possible,
                or the string 'infer' which will try to downcast to an appropriate
                equal type (e.g. float64 to int64 if possible).
        
            Returns
            -------
            Series or None
        """

        return df

    def data_un_merge(self):
        # only for DataFrame
        """
        merge(left, right, how: str = 'inner', on=None, left_on=None, right_on=None, left_index: bool = False,
              right_index: bool = False, sort: bool = False, suffixes=('_x', '_y'), copy: bool = True,
              indicator: bool = False, validate=None)

               -> 'DataFrame'

            Merge DataFrame or named Series objects with a dbs-style join.

            The join is done on columns or indexes. If joining columns on
            columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
            on indexes or indexes on a column or columns, the index will be passed on.

            Parameters
            ----------
            left : DataFrame
            right : DataFrame or named Series
                Object to merge with.
            how : {'left', 'right', 'outer', 'inner'}, default 'inner'
                Type of merge to be performed.

                * left: use only keys from left frame, similar to a SQL left outer join;
                  preserve key order.
                * right: use only keys from right frame, similar to a SQL right outer join;
                  preserve key order.
                * outer: use union of keys from both frames, similar to a SQL full outer
                  join; sort keys lexicographically.
                * inner: use intersection of keys from both frames, similar to a SQL inner
                  join; preserve the order of the left keys.
            on : label or list
                Column or index level names to join on. These must be found in both
                DataFrames. If `on` is None and not merging on indexes then this defaults
                to the intersection of the columns in both DataFrames.
            left_on : label or list, or array-like
                Column or index level names to join on in the left DataFrame. Can also
                be an array or list of arrays of the length of the left DataFrame.
                These arrays are treated as if they are columns.
            right_on : label or list, or array-like
                Column or index level names to join on in the right DataFrame. Can also
                be an array or list of arrays of the length of the right DataFrame.
                These arrays are treated as if they are columns.
            left_index : bool, default False
                Use the index from the left DataFrame as the join key(s). If it is a
                MultiIndex, the number of keys in the other DataFrame (either the index
                or a number of columns) must match the number of levels.
            right_index : bool, default False
                Use the index from the right DataFrame as the join key. Same caveats as
                left_index.
            sort : bool, default False
                Sort the join keys lexicographically in the result DataFrame. If False,
                the order of the join keys depends on the join type (how keyword).
            suffixes : list-like, default is ("_x", "_y")
                A length-2 sequence where each element is optionally a string
                indicating the suffix to add to overlapping column names in
                `left` and `right` respectively. Pass a value of `None` instead
                of a string to indicate that the column name from `left` or
                `right` should be left as-is, with no suffix. At least one of the
                values must not be None.
            copy : bool, default True
                If False, avoid copy if possible.
            indicator : bool or str, default False
                If True, adds a column to the output DataFrame called "_merge" with
                information on the source of each row. The column can be given a different
                name by providing a string argument. The column will have a Categorical
                type with the value of "left_only" for observations whose merge key only
                appears in the left DataFrame, "right_only" for observations
                whose merge key only appears in the right DataFrame, and "both"
                if the observation's merge key is found in both DataFrames.

            validate : str, optional
                If specified, checks if merge is of specified type.

                * "one_to_one" or "1:1": check if merge keys are unique in both
                  left and right datasets.
                * "one_to_many" or "1:m": check if merge keys are unique in left
                  dataset.
                * "many_to_one" or "m:1": check if merge keys are unique in right
                  dataset.
                * "many_to_many" or "m:m": allowed, but does not result in checks.

            Returns
            -------
            DataFrame
                A DataFrame of the two merged objects.

            See Also
            --------
            merge_ordered : Merge with optional filling/interpolation.
            merge_asof : Merge on nearest keys.
            DataFrame.join : Similar method using indices.

            Notes
            -----
            Support for specifying index levels as the `on`, `left_on`, and
            `right_on` parameters was added in version 0.23.0
            Support for merging named Series objects was added in version 0.24.0

            Examples
            --------
            >>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
            ...                     'value': [1, 2, 3, 5]})
            >>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
            ...                     'value': [5, 6, 7, 8]})
            >>> df1
                lkey value
            0   foo      1
            1   bar      2
            2   baz      3
            3   foo      5
            >>> df2
                rkey value
            0   foo      5
            1   bar      6
            2   baz      7
            3   foo      8

            Merge df1 and df2 on the lkey and rkey columns. The value columns have
            the default suffixes, _x and _y, appended.
            >>> df1.merge(df2, left_on='lkey', right_on='rkey')
              lkey  value_x rkey  value_y
            0  foo        1  foo        5
            1  foo        1  foo        8
            2  foo        5  foo        5
            3  foo        5  foo        8
            4  bar        2  bar        6
            5  baz        3  baz        7

            Merge DataFrames df1 and df2 with specified left and right suffixes
            appended to any overlapping columns.

            >>> df1.merge(df2, left_on='lkey', right_on='rkey',
            ...           suffixes=('_left', '_right'))
              lkey  value_left rkey  value_right
            0  foo           1  foo            5
            1  foo           1  foo            8
            2  foo           5  foo            5
            3  foo           5  foo            8
            4  bar           2  bar            6
            5  baz           3  baz            7

            Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
            any overlapping columns.

            >>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
            Traceback (most recent call last):
            ...
            ValueError: columns overlap but no suffix specified:
                Index(['value'], dtype='object')
       """
        # single level index
        df1 = pd.DataFrame({'a': range(3), 'b': range(2, 5), 'c': ['orange', 'apple', 'peach']},
                           index=[1, 2, 3])
        df2 = pd.DataFrame({'a': range(2, 5), 'b': [3, 4, 5], 'c': ['peach', 'apple', 'plum']},
                           index=[1, 3, 5])
        # multi-level index
        df3 = pd.DataFrame({'a': range(3, 6), 'b': [30, 40, 50], 'f': ['apple', 'orange', 'peach']},
                           index=pd.MultiIndex.from_product([[3, 4, 5], ['u']]))
        df3.index.names = ['index-1', 'index-2']
        df4 = pd.DataFrame({'a': range(4, 7), 'b': [40, 50, 60]},
                           index=pd.MultiIndex.from_product([[4, 5, 6], ['u']]))
        df4.index.names = ['index-1', 'index-2']

        df5 = pd.DataFrame({'a': range(5, 8), 'b': [50, 60, 70]},
                           index=pd.MultiIndex.from_tuples([(5, 'u', 'u'), (6, 'u', 'u'), (7, 'u', 'v')]))
        df5.index.names = ['index-1', 'index-2', 'index-3']

        print(
            f" >>> df1\n"
            f"{df1}\n"
            f" >>> df2\n"
            f"{df2}\n"
            
            f"{'-'*80}\n"
            f"# 按照某列为键进行连接， 默认连接方式how='inner'\n"
            f" >>> pd.merge(df1, df2, on='a')\n"
            f"{pd.merge(df1, df2, on='a')}\n"
            
            f"{'-'*80}\n"
            f"# 按照某列为键进行连接， 设置连接方式how='outer'， 设置indicator标识数据来源\n"
            f" >>> pd.merge(df1, df2, on='a', how='outer')\n"
            f"{pd.merge(df1, df2, on='a', how='outer', indicator=True)}\n"

            f"{'-'*80}\n"
            f"# 按照某列为键进行连接， 设置连接方式how='left'\n"
            f" >>> pd.merge(df1, df2, on='a', how='left')\n"
            f"{pd.merge(df1, df2, on='a', how='left', indicator=True)}\n"

            f"{'-'*80}\n"
            f"# 按照某列为键进行连接， 设置连接方式how='right'\n"
            f" >>> pd.merge(df1, df2, on='a', how='right')\n"
            f"{pd.merge(df1, df2, on='a', how='right', indicator=True)}\n"

            f"{'-'*80}\n"
            f"# 按照索引为键进行连接\n"
            f" >>> pd.merge(df1, df2, left_index=True, right_index=True)\n"
            f"{pd.merge(df1, df2, left_index=True, right_index=True)}\n"

            f"{'-'*80}\n"
            f"# 按照列、索引混合方式进行连接\n"
            f" >>> pd.merge(df1, df2, left_index=True, right_index='a')\n"
            f"{pd.merge(df1, df2, left_index=True, right_on='a')}\n"
            
            f"{'-' * 80}\n"
            f"# 使用外连接方式时，可以设置sort按照合并键对结果数据排序\n"
            f"# -- 未排序情况：\n"
            f" >>> pd.merge(df1, df2, on='c', how='outer')\n"
            f"{pd.merge(df1, df2, on='c', how='outer')}\n"
            f"# -- 已排序情况：\n"
            f" >>> pd.merge(df1, df2, on='c', how='outer', sort=True)\n"
            f"{pd.merge(df1, df2, on='c', how='outer', sort=True)}\n"
            
            f"{'-' * 80}\n"
            f"# 多层次索引数据集的merge连接\n"
            f" >>> df4\n"
            f"{df3}\n"
            f" >>> df4\n"
            f"{df4}\n"
            f" >>> df5\n"
            f"{df5}\n"
            
            f"{'-' * 80}\n"
            f"# 按照层次进行连接, 连接使用的多层索引层次名需要相同\n"
            f" >>> df3.merge(df4, left_on='ind-1', right_on='ind-1')\n"
            f"{df3.merge(df4, left_on='index-1', right_on='index-1')}\n"
            f"{'-.' * 40}\n"
            "# 按照层次进行连接, 使用的索引层次名不同，会触发异常KeyError\n"
            f" >>> df3.merge(df4, left_on='index-2', right_on='index-3')\n"
            # f"{df3.merge(df5, left_on='index-2', right_on='index-3')}\n"
            " ......\n"
            "KeyError: None of [index-2] are in the columns\n"

            )

        return

    def data_un_concat(self):
        # only for DtaFrame
        """
        concat(objs: Union[Iterable[~FrameOrSeries], Mapping[Union[Hashable, NoneType], ~FrameOrSeries]], axis=0,
               join='outer', ignore_index: bool = False, keys=None, levels=None, names=None,
               verify_integrity: bool = False, sort: bool = False, copy: bool = True)

               -> Union[ForwardRef('DataFrame'), ForwardRef('Series')]

            Concatenate pandas objects along a particular axis with optional set logic
            along the other axes.

            Can also add a layer of hierarchical indexing on the concatenation axis,
            which may be useful if the labels are the same (or overlapping) on
            the passed axis number.

            Parameters
            ----------
            objs : a sequence or mapping of Series or DataFrame objects
                If a mapping is passed, the sorted keys will be used as the `keys`
                argument, unless it is passed, in which case the values will be
                selected (see below). Any None objects will be dropped silently unless
                they are all None in which case a ValueError will be raised.
            axis : {0/'index', 1/'columns'}, default 0
                The axis to concatenate along.
            join : {'inner', 'outer'}, default 'outer'
                How to handle indexes on other axis (or axes).
            ignore_index : bool, default False
                If True, do not use the index values along the concatenation axis. The
                resulting axis will be labeled 0, ..., n - 1. This is useful if you are
                concatenating objects where the concatenation axis does not have
                meaningful indexing information. Note the index values on the other
                axes are still respected in the join.
            keys : sequence, default None
                If multiple levels passed, should contain tuples. Construct
                hierarchical index using the passed keys as the outermost level.
            levels : list of sequences, default None
                Specific levels (unique values) to use for constructing a
                MultiIndex. Otherwise they will be inferred from the keys.
            names : list, default None
                Names for the levels in the resulting hierarchical index.
            verify_integrity : bool, default False
                Check whether the new concatenated axis contains duplicates. This can
                be very expensive relative to the actual data concatenation.
            sort : bool, default False
                Sort non-concatenation axis if it is not already aligned when `join`
                is 'outer'.
                This has no effect when ``join='inner'``, which already preserves
                the order of the non-concatenation axis.

                .. versionadded:: 0.23.0
                .. versionchanged:: 1.0.0

                   Changed to not sort by default.

            copy : bool, default True
                If False, do not copy data unnecessarily.

            Returns
            -------
            object, type of objs
                When concatenating all ``Series`` along the index (axis=0), a
                ``Series`` is returned. When ``objs`` contains at least one
                ``DataFrame``, a ``DataFrame`` is returned. When concatenating along
                the columns (axis=1), a ``DataFrame`` is returned.

            See Also
            --------
            Series.append : Concatenate Series.
            DataFrame.append : Concatenate DataFrames.
            DataFrame.join : Join DataFrames using indexes.
            DataFrame.merge : Merge DataFrames by indexes or columns.

            Notes
            -----
            The keys, levels, and names arguments are all optional.

            A walkthrough of how this method fits in with other tools for combining
            pandas objects can be found `here
            <https://pandas.pydata.org/pandas-docs/stable/user_guide/merging.html>`__.

            Examples
            --------
            Combine two ``Series``.

            >>> s1 = pd.Series(['a', 'b'])
            >>> s2 = pd.Series(['c', 'd'])
            >>> pd.concat([s1, s2])
            0    a
            1    b
            0    c
            1    d
            dtype: object

            Clear the existing index and reset it in the result
            by setting the ``ignore_index`` option to ``True``.
            >>> pd.concat([s1, s2], ignore_index=True)
            0    a
            1    b
            2    c
            3    d
            dtype: object

            Add a hierarchical index at the outermost level of
            the data with the ``keys`` option.

            >>> pd.concat([s1, s2], keys=['s1', 's2'])
            s1  0    a
                1    b
            s2  0    c
                1    d
            dtype: object

            Label the index keys you create with the ``names`` option.

            >>> pd.concat([s1, s2], keys=['s1', 's2'],
            ...           names=['Series name', 'Row ID'])
            Series name  Row ID
            s1           0         a
                         1         b
            s2           0         c
                         1         d
            dtype: object

            Combine two ``DataFrame`` objects with identical columns.

            >>> df1 = pd.DataFrame([['a', 1], ['b', 2]],
            ...                    columns=['letter', 'number'])
            >>> df1
              letter  number
            0      a       1
            1      b       2
            >>> df2 = pd.DataFrame([['c', 3], ['d', 4]],
            ...                    columns=['letter', 'number'])
            >>> df2
              letter  number
            0      c       3
            1      d       4
            >>> pd.concat([df1, df2])
              letter  number
            0      a       1
            1      b       2
            0      c       3
            1      d       4

            Combine ``DataFrame`` objects with overlapping columns
            and return everything. Columns outside the intersection will
            be filled with ``NaN`` values.

            >>> df3 = pd.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
            ...                    columns=['letter', 'number', 'animal'])
            >>> df3
              letter  number animal
            0      c       3    cat
            1      d       4    dog
            >>> pd.concat([df1, df3], sort=False)
              letter  number animal
            0      a       1    NaN
            1      b       2    NaN
            0      c       3    cat
            1      d       4    dog

            Combine ``DataFrame`` objects with overlapping columns
            and return only those that are shared by passing ``inner`` to
            the ``join`` keyword argument.

            >>> pd.concat([df1, df3], join="inner")
              letter  number
            0      a       1
            1      b       2
            0      c       3
            1      d       4

            Combine ``DataFrame`` objects horizontally along the x axis by
            passing in ``axis=1``.

            >>> df4 = pd.DataFrame([['bird', 'polly'], ['monkey', 'george']],
            ...                    columns=['animal', 'name'])
            >>> pd.concat([df1, df4], axis=1)
              letter  number  animal    name
            0      a       1    bird   polly
            1      b       2  monkey  george

            Prevent the result from including duplicate index values with the
            ``verify_integrity`` option.

            >>> df5 = pd.DataFrame([1], index=['a'])
            >>> df5
               0
            a  1
            >>> df6 = pd.DataFrame([2], index=['a'])
            >>> df6
               0
            a  2
            >>> pd.concat([df5, df6], verify_integrity=True)
            Traceback (most recent call last):
                ...
            ValueError: Indexes have overlapping values: ['a']

        """

        # notes for levels
        """
        关于levels的使用：
        levels与keys参数一起使用。当levels保留为默认值None时，Pandas将获取结果的每个级别的唯一值MultiIndex，
        并将其用作结果index.levels属性中使用的对象。
        
        Levels：级别。序列列表，默认值None。
        用于构造MultiIndex（唯一值）。否则，它们将从键中推断出来。
        
        如果Pandas已经推断出这些水平应该是什么，那么我们有什么优势来指定它？我将展示一个示例，
        并让您自己思考为什么这可能有用的其他原因。
        
        例如，根据文档，levels参数是序列列表。这意味着我们可以使用另一个pandas.Index作为其中一个序列。
        
        考虑作为df串联的数据框d1，d2并且d3：
        df1:
        
        
        df = pd.concat(
            [d1, d2, d3], axis=1,
            keys=['First', 'Second', 'Fourth'])
        
        df
        
          First             Second           Fourth
              A    B    C      B    C    D      A    B    D
        1   NaN  NaN  NaN    0.4  0.5  0.6    0.7  0.8  0.9
        2   0.1  0.2  0.3    0.4  0.5  0.6    NaN  NaN  NaN
        3   0.1  0.2  0.3    NaN  NaN  NaN    0.7  0.8  0.9
        
        列对象的级别为：
        print(df, *df.columns.levels, sep='\n')
        Index(['First', 'Second', 'Fourth'], dtype='object')
        Index(['A', 'B', 'C', 'D'], dtype='object')
        
        如果我们sum在一个内部使用groupby我们得到：
        df.groupby(axis=1, level=0).sum()
           First  Fourth  Second
        1    0.0     2.4     1.5
        2    0.6     0.0     1.5
        3    0.6     2.4     0.0
        
        但是，如果除了['First', 'Second', 'Fourth']， 还有其他类别'Third','Fifth'，
        希望将它们也包含在groupby聚合的结果中，怎么办呢？
        可以设置枚举类型索引pandas.CategoricalIndex，并赋值给levels参数。
        
        以如下方式重新concat：
        cats = ['First', 'Second', 'Third', 'Fourth', 'Fifth']
        cindex = pd.CategoricalIndex(cats, categories=cats, ordered=True)
        df = pd.concat(
            [d1, d2, d3], axis=1,
            keys=['First', 'Second', 'Fourth'],
            levels=[cindex]
            )
        
        df
           First  Fourth  Second
        1    0.0     2.4     1.5
        2    0.6     0.0     1.5
        3    0.6     2.4     0.0
        
        column第一级为：
        df.columns.levels[0]
        CategoricalIndex(['First', 'Second', 'Third', 'Fourth', 'Fifth'],
            			  categories=['First', 'Second', 'Third', 'Fourth', 'Fifth'],
            			  ordered=True, dtype='category')
        groupby结果为：
        df.groupby(axis=1, level=0).sum()
           First  Second  Third  Fourth  Fifth
        1    0.0     1.5    0.0     2.4    0.0
        2    0.6     1.5    0.0     0.0    0.0
        3    0.6     0.0    0.0     2.4    0.0
        """

        df1 = pd.DataFrame({'a': range(3), 'b': range(2, 5), 'c': ['apple', 'orange', 'peach']},
                           index=[1, 2, 3])
        df2 = pd.DataFrame({'a': range(20, 23), 'b': [20, 30, 40]},
                           index=[2, 3, 4])

        df3 = pd.DataFrame({'a': range(3, 6), 'b': [30, 40, 50], 'f': ['apple', 'orange', 'peach']},
                           index=pd.MultiIndex.from_product([[3, 4, 5], ['u']]))
        df4 = pd.DataFrame({'a': range(4, 7), 'b': [40, 50, 60]},
                           index=pd.MultiIndex.from_product([[4, 5, 6], ['v']]))
        df5 = pd.DataFrame({'a': range(5, 8), 'b': [50, 60, 70]},
                           index=pd.MultiIndex.from_product([[5, 6, 7], ['u'], ['v']]))

        catindex = pd.CategoricalIndex(['df1', 'df2', 'df3', 'df4'])

        df_concat = pd.concat([df1, df2], keys=['df1', 'df2'], axis=1, levels=[catindex])

        # fail on pandas 1.2.1 !!!
        # pass on pandas 1.1.5 vvv
        dfr_group = df_concat.groupby(axis=1, level=0)
        # dfr_sum = dfr_group.sum()

        print(
            f" >>> df1\n"
            f"{df1}\n"
            f" >>> df2\n"
            f"{df2}\n"
            
            f"{'-'*80}\n"
            f"# 垂直连接：axis=0\n"
            f" >>> pd.concat([df1, df2])\n"
            f"{pd.concat([df1, df2])}\n"

            f"{'-'*80}\n"
            f"# 水平连接：设置axis=1，沿列向进行合并（缺省为join='outer'方式）\n"
            f" >>> pd.concat([df1, df2], axis=1)\n"
            f"{pd.concat([df1, df2], axis=1)}\n"
            
            f"{'-' * 80}\n"
            f"# 水平连接：设置join=’inner‘, 只合并共有索引值数据\n"
            f" >>> pd.concat([df1, df2], axis=1)\n"
            f"{pd.concat([df1, df2], axis=1, join='inner')}\n"
            
            f"{'-' * 80}\n"
            f"# 水平方向连接, 设置keys可以产生多层索引，避免标签值重复\n"
            f" >>> pd.concat([df1, df2], keys=['df1', 'df2'], axis=1)\n"
            f"{pd.concat([df1, df2], keys=['df1', 'df2'], axis=1)}\n"

            f"{'-'*80}\n"
            f"# 如果索引标签有重复，设置verify_integrity=True检测时，会触发异常\n"
            " >>> pd.concat([df1, df2], axis=1, verify_integrity=True)\n"
            # f"{pd.concat([df1, df2], axis=1, verify_integrity=True)}\n"
            "ValueError: Indexes have overlapping values: Index(['a', 'b'], dtype='object')\n"
            
            f"{'-'*80}\n"
            f"# 设置keys后，可以使用levels设置keys索引层的枚举值域\n"
            f" >>> pd.concat([df1, df2], keys=['df1', 'df2'], axis=1)\n"
            f"{pd.concat([df1, df2], keys=['df1', 'df2'], axis=1, levels=[pd.Index(['df1', 'df2', 'df3', 'df4'])])}\n"
            
            f"{'-.'*40}\n"
            f"# keys的索引层标签值的值域（category）定义为['df1', 'df2', 'df3', 'df4']\n"
            f" >>> df_concat = pd.concat([df1, df2], keys=['df1', 'df2'], axis=1, \\ \n"
            f" ...                       levels=[pd.CategoricalIndex(['df1', 'df2', 'df3', 'df4'])])\n"
            f" >>> df_concat.columns.levels\n"
            f"{df_concat.columns.levels}\n"
            
            f"{'-.'*40}\n"
            f"# 使用分组统计时，可以看到索引标签值使用了keys的值域设置（Pandas 1.2.X不再支持）\n"
            f" >>> df_concat.groupby(axis=1, level=0).sum()\n"
            f"{dfr_group.sum()}\n"

            f"{'='*80}\n"
            f"# 多层索引数据集连接\n"
            f" >>> df3\n"
            f"{df3}\n"
            f" >>> df4\n"
            f"{df4}\n"
            f" >>> df5\n"
            f"{df5}\n"
            
            f"{'-.'*40}\n"
            f"# 多层索引，层次数相同时，按照层次对齐连接\n"
            f" >>> pd.concat([df3, df4])\n"
            f"{pd.concat([df3, df4])}\n"
            
            f"{'-.'*40}\n"
            f"# 单层、多层索引之间，多层索引以元组方式作为单层索引连接\n"
            f" >>> pd.concat([df3, df4])\n"
            f"{pd.concat([df1, df5])}\n"
            
            f"{'-.'*40}\n"
            f"# 多层索引之间，层次数不同, 按照最少层次数对齐连接\n"
            f" >>> pd.concat([df3, df5])\n"
            f"{pd.concat([df3, df5])}\n"

            f"{'-.' * 40}\n"
            f"# 多层索引层次数相同时，可以增加各数据集的索引关键字, 置于最索引最内层，即层次0\n"
            f" >>> pd.concat([df3, df4], keys=['ind-df3', 'ind-df4'])\n"
            f"{pd.concat([df3, df4], keys=['ind-df3', 'ind-df4'])}\n"

            f"{'-.' * 40}\n"
            f"# 多层索引层次数不同，设置keys增加各数据集的索引关键字, 将触发异常\n"
            f" >>> pd.concat([df3, df4], keys=['ind-df3', 'ind-df4'])\n"
            # f"{pd.concat([df3, df5], keys=['ind-df3', 'ind-df4'])}\n"
            "AssertionError: Cannot concat indices that do not have the same number of levels\n"

            # more complex keys leves setting
            # f"{'-' * 80}\n"
            # f"# 设置keys后，可以使用levels设置keys索引层的枚举值域\n"
            # f" >>> pd.concat([df3, df4], keys=['no3', 'no4'], axis=1,
            # levels=[pd.Index(['df1', 'df2', 'df3', 'df4'])])\n"
            # f"{pd.concat([df1, df2], keys=[('df1', 1), ('df2', 2)],
            # axis=1, levels=[pd.Index(['df1', 'df2', 'df3', 'df4']), pd.Index([1, 2])])}\n"
        )

        return

    def data_un_join(self):
        """
        join(self, other, on=None, how='left', lsuffix='', rsuffix='', sort=False)

            Join columns of another DataFrame.

            Join columns with other DataFrame either on index or on a key
            column. Efficiently Join multiple DataFrame objects by index at once by
            passing a list.

            Parameters
            ----------
            other : DataFrame, Series with name field set, or list of DataFrame
                Index should be similar to one of the columns in this one. If a
                Series is passed, its name attribute must be set, and that will be
                used as the column name in the resulting joined DataFrame
            on : name, tuple/list of names, or array-like
                Column or index level name(s) in the caller to join on the index
                in `other`, otherwise joins index-on-index. If multiple
                values given, the `other` DataFrame must have a MultiIndex. Can
                pass an array as the join key if it is not already contained in
                the calling DataFrame. Like an Excel VLOOKUP operation
            how : {'left', 'right', 'outer', 'inner'}, default: 'left'
                How to handle the operation of the two objects.

                * left: use calling frame's index (or column if on is specified)
                * right: use other frame's index
                * outer: form union of calling frame's index (or column if on is
                  specified) with other frame's index, and sort it
                  lexicographically
                * inner: form intersection of calling frame's index (or column if
                  on is specified) with other frame's index, preserving the order
                  of the calling's one
            lsuffix : string
                Suffix to use from left frame's overlapping columns
            rsuffix : string
                Suffix to use from right frame's overlapping columns
            sort : boolean, default False
                Order result DataFrame lexicographically by the join key. If False,
                the order of the join key depends on the join type (how keyword)

            Notes
            -----
            on, lsuffix, and rsuffix options are not supported when passing a list
            of DataFrame objects

            Support for specifying index levels as the `on` parameter was added
            in version 0.23.0

            Examples
            --------
            >>> caller = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
            ...                        'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})

            >>> caller
                A key
            0  A0  K0
            1  A1  K1
            2  A2  K2
            3  A3  K3
            4  A4  K4
            5  A5  K5

            >>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
            ...                       'B': ['B0', 'B1', 'B2']})

            >>> other
                B key
            0  B0  K0
            1  B1  K1
            2  B2  K2

            Join DataFrames using their indexes.

            >>> caller.join(other, lsuffix='_caller', rsuffix='_other')

            >>>     A key_caller    B key_other
                0  A0         K0   B0        K0
                1  A1         K1   B1        K1
                2  A2         K2   B2        K2
                3  A3         K3  NaN       NaN
                4  A4         K4  NaN       NaN
                5  A5         K5  NaN       NaN


            If we want to join using the key columns, we need to set key to be
            the index in both caller and other. The joined DataFrame will have
            key as its index.

            >>> caller.set_index('key').join(other.set_index('key'))

            >>>      A    B
                key
                K0   A0   B0
                K1   A1   B1
                K2   A2   B2
                K3   A3  NaN
                K4   A4  NaN
                K5   A5  NaN

            Another option to join using the key columns is to use the on
            parameter. DataFrame.join always uses other's index but we can use any
            column in the caller. This method preserves the original caller's
            index in the result.

            >>> caller.join(other.set_index('key'), on='key')

            >>>     A key    B
                0  A0  K0   B0
                1  A1  K1   B1
                2  A2  K2   B2
                3  A3  K3  NaN
                4  A4  K4  NaN
                5  A5  K5  NaN


            See also
            --------
            DataFrame.merge : For column(s)-on-columns(s) operations
            Returns
            -------
            joined : DataFrame
        """

        df1 = pd.DataFrame({'a': range(3), 'b': range(2, 5), 'c': ['apple', 'orange', 'peach']},
                           index=[1, 2, 3])
        df2 = pd.DataFrame({'a': range(20, 23), 'b': [20, 30, 40]},
                           index=[2, 3, 4])

        df3 = pd.DataFrame({'a': range(3, 6), 'b': [30, 40, 50], 'f': ['apple', 'orange', 'peach']},
                           index=pd.MultiIndex.from_product([[3, 4, 5], ['u']]))
        df3.index.names = ['index-1', 'index-2']

        # df4 = pd.DataFrame({'a': range(4, 7), 'b': [40, 50, 60]},
        #                    index=pd.MultiIndex.from_product([[4, 5, 6], ['v']]))
        # df5 = pd.DataFrame({'a': range(5, 8), 'b': [50, 60, 70]},
        #                    index=pd.MultiIndex.from_product([[5, 6, 7], ['u'], ['v']]))

        sr = pd.Series([1000, 2000, 3000], name='myseries')

        print(
            "# 示例数据集\n"
            " >>> df1\n"
            f"{df1}\n"
            f"{'-'*30}\n"           
            " >>> df2\n"
            f"{df2}\n"
            f"{'-'*30}\n"           
            f" >>> df3\n"
            f"{df3}\n"
            f"{'-'*30}\n"           
            f" >>> sr\n"
            f"{sr}\n"

            f"{'-'*80}\n"           
            f"# 使用内连接方式(缺省)， 设置重复列的后缀名\n"
            f" >>> df1.join(df2, lsuffix='_left', rsuffix='_right')\n"
            f"{df1.join(df2, lsuffix='_left', rsuffix='_right')}\n"

            f"{'-'*80}\n"           
            f"# 使用外连接方式， 索引合并为并集\n"
            f" >>> df1.join(df2, lsuffix='_left', rsuffix='_right', how='outer')\n"
            f"{df1.join(df2, lsuffix='_left', rsuffix='_right', how='outer')}\n"
            
            f"{'-'*80}\n"
            f"# 使用多层索引的层次作为连接键， 设置on指定调用数据集的多层索引层次名称\n"
            f" >>> df3.join(df1, on='index-1', lsuffix='_left', rsuffix='_right')\n"
            f"{df3.join(df1, on='index-1', lsuffix='_left', rsuffix='_right')}\n"
            
            f"{'-'*80}\n"
            f"# 连接具有名称属性的Series数据集\n"
            f" >>> df1.join(sr)\n"
            f"{df1.join(sr)}\n"
            
            f"{'-'*80}\n"
            f"# 如果数据集中有重复的列，则需要明确设置左、右数据集列的后缀名，否则会触发异常\n"
            f" >>> df1.join(df2)\n"
            # f"{df1.join(df2)}\n"
            f"# ValueError: columns overlap but no suffix specified: Index(['a', 'b'], dtype='object')\n"
        )

        return

    def data_un_update(self):
        # update for Series
        """
        update(self, other) -> None
            Modify Series in place using values from passed Series.

            Uses non-NA values from passed Series to make updates. Aligns
            on index.

            Parameters
            ----------
            other : Series, or object coercible into Series

            Examples
            --------
            >>> s = pd.Series([1, 2, 3])
            >>> s.update(pd.Series([4, 5, 6]))
            >>> s
            0    4
            1    5
            2    6
            dtype: int64

            >>> s = pd.Series(['a', 'b', 'c'])
            >>> s.update(pd.Series(['d', 'e'], index=[0, 2]))
            >>> s
            0    d
            1    b
            2    e
            dtype: object

            >>> s = pd.Series([1, 2, 3])
            >>> s.update(pd.Series([4, 5, 6, 7, 8]))
            >>> s
            0    4
            1    5
            2    6
            dtype: int64

            If ``other`` contains NaNs the corresponding values are not updated
            in the original Series.

            >>> s = pd.Series([1, 2, 3])
            >>> s.update(pd.Series([4, np.nan, 6]))
            >>> s
            0    4
            1    2
            2    6
            dtype: int64

            ``other`` can also be a non-Series object type
            that is coercible into a Series

            >>> s = pd.Series([1, 2, 3])
            >>> s.update([4, np.nan, 6])
            >>> s
            0    4
            1    2
            2    6
            dtype: int64

            >>> s = pd.Series([1, 2, 3])
            >>> s.update({1: 9})
            >>> s
            0    1
            1    9
            2    3
            dtype: int64
        """

        # update for DateFrame
        """
        update(self, other, join='left', overwrite=True, filter_func=None, errors='ignore')
            
            -> 'None'
         
            Modify in place using non-NA values from another DataFrame.

            Aligns on indices. There is no return value.

            Parameters
            ----------
            other : DataFrame, or object coercible into a DataFrame
                Should have at least one matching index/column label
                with the original DataFrame. If a Series is passed,
                its name attribute must be set, and that will be
                used as the column name to align with the original DataFrame.
            join : {'left'}, default 'left'
                Only left join is implemented, keeping the index and columns of the original object.
            overwrite : bool, default True
                How to handle non-NA values for overlapping keys:

                * True: overwrite original DataFrame's values
                  with values from `other`.
                * False: only update values that are NA in
                  the original DataFrame.

            filter_func : callable(1d-array) -> bool 1d-array, optional
                Can choose to replace values other than NA. Return True for values
                that should be updated.
            errors : {'raise', 'ignore'}, default 'ignore'
                If 'raise', will raise a ValueError if the DataFrame and `other`
                both contain non-NA data in the same place.

                .. versionchanged:: 0.24.0
                   Changed from `raise_conflict=False|True`
                   to `errors='ignore'|'raise'`.

            Returns
            -------
            None : method directly changes calling object

            Raises
            ------
            ValueError
                * When `errors='raise'` and there's overlapping non-NA data.
                * When `errors` is not either `'ignore'` or `'raise'`
            NotImplementedError
                * If `join != 'left'`

            See Also
            --------
            dict.update : Similar method for dictionaries.
            DataFrame.merge : For column(s)-on-column(s) operations.

            Examples
            --------
            >>> df = pd.DataFrame({'A': [1, 2, 3],
            ...                    'B': [400, 500, 600]})
            >>> new_df = pd.DataFrame({'B': [4, 5, 6],
            ...                        'C': [7, 8, 9]})
            >>> df.update(new_df)
            >>> df
               A  B
            0  1  4
            1  2  5
            2  3  6

            The DataFrame's length does not increase as a result of the update,
            only values at matching index/column labels are updated.

            >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
            ...                    'B': ['x', 'y', 'z']})
            >>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
            >>> df.update(new_df)
            >>> df
               A  B
            0  a  d
            1  b  e
            2  c  f

            For Series, its name attribute must be set.

            >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
            ...                    'B': ['x', 'y', 'z']})
            >>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
            >>> df.update(new_column)
            >>> df
               A  B
            0  a  d
            1  b  y
            2  c  e
            >>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
            ...                    'B': ['x', 'y', 'z']})
            >>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
            >>> df.update(new_df)
            >>> df
               A  B
            0  a  x
            1  b  d
            2  c  e

            If `other` contains NaNs the corresponding values are not updated
            in the original dataframe.

            >>> df = pd.DataFrame({'A': [1, 2, 3],
            ...                    'B': [400, 500, 600]})
            >>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
            >>> df.update(new_df)
            >>> df
               A      B
            0  1    4.0
            1  2  500.0
            2  3    6.0
        """

        df1 = pd.DataFrame({'a': [np.NaN, 1, 2],
                            'b': [2, np.NaN, 4],
                            'c': ['apple', np.NaN, 'peach']},
                           index=[1, 2, 3])
        df1copy = df1.copy(True)
        df2 = pd.DataFrame({'a': range(200, 203), 'b': [np.NaN, 300, 400], 'c': ['banana', 'apricot', 'litchi']},
                           index=[1, 2, 3])

        df3 = pd.DataFrame({'a': range(300, 303), 'b': [300, 400, 500], 'c': ['watermelon', 'mulberry', np.NaN]},
                           index=[1, 2, 3])

        df4 = pd.DataFrame({'a': range(300, 303), 'b': [300, np.NaN, 500]},
                           index=[1, 2, 3])

        sr1 = pd.Series([1000, 2000, 3000, 4000], name='a', index=[1, 2, 3, 4])
        sr2 = pd.Series(['cherry', 'grape', 'plum', 'coconut'], name='c', index=[1, 2, 3, 4])
        sr3 = pd.Series([5000, 5002, 5003, 5004], name='b', index=[1, 2, 3, 4])

        # print(df3.info())
        # print(type(df3.iat[2, 2]), df3.iat[2, 2] is np.NAN)

        def check_overwrite(x):
            return np.array([True if (v >300 or np.isnan(v)) else False for v in x])

        print(
            "# DataFrame数据集更新\n"
            f"{'-'*30}\n"
            " >>> df1\n"
            f"{df1}\n"
            f"{'-'*30}\n"           
            " >>> df2\n"
            f"{df2}\n"

            f"{'-'*30}\n"        
            f"# 设置overwrite=False，仅更新NaN值数据\n"
            f" >>> df1.update(df2, overwrite=False)\n"
            f"{df1.update(df2, overwrite=False)}\n"
            f" >>> df1\n"
            f"{df1}\n"

            f"{'-'*30}\n"
            f"# 缺省设置overwrite=True，使用df2中非NaN数据更新df1\n"
            f" >>> df1.update(df2)\n"
            f"{df1.update(df2)}\n"
            f"# >>> df1\n"
            f"{df1}\n"

            f"{'-'*80}\n"
            f"# 从Series数据集更新DataFrame数据\n"           
            f"{'-'*30}\n"
            f" >>> df3\n"
            f"{df3}\n"
            f" >>> sr1\n"
            f"{sr1}\n"
            f"{'-'*30}\n"
            f" >>> df3.update(sr1)\n"
            f"{df3.update(sr1)}\n"
            f" >>> df3\n"
            f"{df3}\n"

            f"# 更新Series数据集\n"
            f" >>> sr2\n"
            f"{sr2}\n"
            f"{'-'*30}\n"
            " >>> sr2.update({0: 10000, 1: 20000})\n"
            f"{sr2.update({0: 10000, 1: 20000})}\n"
            f" >>> sr2\n"
            f"{sr2}\n"

            f"{'-'*80}\n"
            f"# 使用过滤函数， 限制仅更新大于300或为NaN值的数据\n"
            f"# --- 原数据集\n"
            f" >>> df4\n"
            f"{df4}\n"
            f" >>> sr3\n"
            f"{sr3}\n"
            f"# --- 定义过滤函数\n"
            f" >>> def check(x):\n"
            f"         return np.array([True if (v>300 or np.isnan(v)) else False for v in x])\n"
            f"# --- 设置过滤函数， 从sr3更新df4的有关数据\n"
            f" >>> df4.update(sr3, filter_func=check_overwrite)\n"
            f"{df4.update(sr3, filter_func=check_overwrite)}\n"
            f"# --- 更新了数据列b中NaN值和等于500的数据\n"
            f" >>> df4\n"
            f"{df4}"
        )

        return

    def data_un_combine(self):
        """
        combine(self, other: 'DataFrame', func, fill_value=None, overwrite=True)
               -> 'DataFrame'

            Perform column-wise combine with another DataFrame.

            Combines a DataFrame with `other` DataFrame using `func`
            to element-wise combine columns. The row and column indexes of the
            resulting DataFrame will be the union of the two.

            Parameters
            ----------
            other : DataFrame
                The DataFrame to merge column-wise.
            func : function
                Function that takes two series as inputs and return a Series or a
                scalar. Used to merge the two dataframes column by columns.
            fill_value : scalar value, default None
                The value to fill NaNs with prior to passing any column to the
                merge func.
            overwrite : bool, default True
                If True, columns in `self` that do not exist in `other` will be
                overwritten with NaNs.

            Returns
            -------
            DataFrame
                Combination of the provided DataFrames.

            See Also
            --------
            DataFrame.combine_first : Combine two DataFrame objects and default to
                non-null values in frame calling the method.

            Examples
            --------
            Combine using a simple function that chooses the smaller column.

            >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
            >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
            >>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
            >>> df1.combine(df2, take_smaller)
               A  B
            0  0  3
            1  0  3

            Example using a true element-wise combine function.

            >>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
            >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
            >>> df1.combine(df2, np.minimum)
               A  B
            0  1  2
            1  0  3

            Using `fill_value` fills Nones prior to passing the column to the
            merge function.

            >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
            >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
            >>> df1.combine(df2, take_smaller, fill_value=-5)
               A    B
            0  0 -5.0
            1  0  4.0

            However, if the same element in both dataframes is None, that None
            is preserved

            >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
            >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
            >>> df1.combine(df2, take_smaller, fill_value=-5)
                A    B
            0  0 -5.0
            1  0  3.0

            Example that demonstrates the use of `overwrite` and behavior when
            the axis differ between the dataframes.

            >>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
            >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
            >>> df1.combine(df2, take_smaller)
                 A    B     C
            0  NaN  NaN   NaN
            1  NaN  3.0 -10.0
            2  NaN  3.0   1.0

            >>> df1.combine(df2, take_smaller, overwrite=False)
                 A    B     C
            0  0.0  NaN   NaN
            1  0.0  3.0 -10.0
            2  NaN  3.0   1.0

            Demonstrating the preference of the passed in dataframe.

            >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
            >>> df2.combine(df1, take_smaller)
               A    B   C
            0  0.0  NaN NaN
            1  0.0  3.0 NaN
            2  NaN  3.0 NaN

            >>> df2.combine(df1, take_smaller, overwrite=False)
                 A    B   C
            0  0.0  NaN NaN
            1  0.0  3.0 1.0
            2  NaN  3.0 1.0
        """

        # for Series
        """
        combine(self, other, func, fill_value=None) -> 'Series'
            Combine the Series with a Series or scalar according to `func`.
        
            Combine the Series and `other` using `func` to perform elementwise
            selection for combined Series.
            `fill_value` is assumed when value is missing at some index
            from one of the two objects being combined.
        
            Parameters
            ----------
            other : Series or scalar
                The value(s) to be combined with the `Series`.
            func : function
                Function that takes two scalars as inputs and returns an element.
            fill_value : scalar, optional
                The value to assume when an index is missing from
                one Series or the other. The default specifies to use the
                appropriate NaN value for the underlying dtype of the Series.
        
            Returns
            -------
            Series
                The result of combining the Series with the other object.
        
            See Also
            --------
            Series.combine_first : Combine Series values, choosing the calling
                Series' values first.
            Examples
            --------
            Consider 2 Datasets ``s1`` and ``s2`` containing
            highest clocked speeds of different birds.
        
            >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0})
            >>> s1
            falcon    330.0
            eagle     160.0
            dtype: float64
            >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0})
            >>> s2
            falcon    345.0
            eagle     200.0
            duck       30.0
            dtype: float64
        
            Now, to combine the two datasets and view the highest speeds
            of the birds across the two datasets
        
            >>> s1.combine(s2, max)
            duck        NaN
            eagle     200.0
            falcon    345.0
            dtype: float64
        
            In the previous example, the resulting value for duck is missing,
            because the maximum of a NaN and a float is a NaN.
            So, in the example, we set ``fill_value=0``,
            so the maximum value returned will be the value from some dataset.
        
            >>> s1.combine(s2, max, fill_value=0)
            duck       30.0
            eagle     200.0
            falcon    345.0
            dtype: float64
        """

        df1 = pd.DataFrame({'a': [1, None, 2], 'b': [None, 20, 30]})
        df2 = pd.DataFrame({'b': [100, 200, 300, 400], 'c': [None, 2000, 3000, None]})
        sr1 = pd.Series([None, None, 50, 500], index=[1, 2, 3, 4])
        sr2 = pd.Series([100, None, None, 120, 180], index=[1, 2, 3, 4, 5])

        def choose_series(s1, s2):
            return [v1+v2 if v1 > v2 else v1-v2 for v1, v2 in zip(s1, s2)]

        def choose(s1, s2):
            return s1+1000 if s1 > s2 else s2+2000

        def choose2(s1, s2):
            result = s2
            if np.isnan(s1):
                result = 1000
            elif np.isnan(s2):
                result = 2000
            return result

        print(
            f"# 逐列组合合并DataFrame数据集\n"
            f" >>> df1\n"
            f"{df1}\n"
            f" >>> df2\n"
            f"{df2}\n"
            f"{'-'*80}\n"
            f" >>> df1.combine(df2, func=np.maximum)\n"
            f"{df1.combine(df2, func=np.maximum)}\n"
            
            f"# 设置overwrite=False, 对缺失数据不做NaN填充\n"
            f" >>> df1.combine(df2, func=np.maximum, overwrite=False)\n"
            f"{df1.combine(df2, func=np.maximum, overwrite=False)}\n"
            
            f"# 设置fill_value=1000, 对缺失数据进行处理前填充\n"
            f" >>> df1.combine(df2, func=np.maximum, overwrite=False, fill_value=1000)\n"
            f"{df1.combine(df2, func=np.maximum, overwrite=False, fill_value=1000)}\n"
            
            f"# 设置自定义函数， 函数以Series数据作为输入进行处理， 返回一维序列\n"
            f"\n"
            f" >>> def choose_series(s1, s2):\n"
            f"         return [v1+1000 if v1 > v2 else v2+2000 for v1, v2 in zip(s1, s2)]\n"
            f"\n"
            f" >>> df1.combine(df2, func=choose_series, overwrite=False)\n"
            f"{df1.combine(df2, func=choose_series, overwrite=False)}\n"

            f"{'-'*80}\n"
            f"# 逐行组合合并Series数据集\n"
            f" >>> sr1\n"
            f"{sr1}\n"
            f" >>> sr2\n"
            f"{sr2}\n"
            # f"{'-'*80}\n"
            f"# 使用自定义函数， 输入数据为标量\n"
            f"\n"
            f" >>> def choose(x1, x2):\n"
            f"         return x1+1000 if x1 > x2 else x2+2000\n"
            f"\n"
            f" >>> sr1.combine(sr2, func=choose)\n"
            f"{sr1.combine(sr2, func=choose)}\n"

            # f" >>> sr1.combine(sr2, func=choose2, fill_value=600)\n"
            # f"{sr1.combine(sr2, func=choose2, fill_value=600)}\n"
        )

        return

    def data_un_combine_first(self):
        # combine_first for Series
        """
        combine_first(self, other) -> 'Series'

            Combine Series values, choosing the calling Series's values first.

            Parameters
            ----------
            other : Series
                The value(s) to be combined with the `Series`.

            Returns
            -------
            Series
                The result of combining the Series with the other object.

            See Also
            --------
            Series.combine : Perform elementwise operation on two Series
                using a given function.

            Notes
            -----
            Result index will be the union of the two indexes.

            Examples
            --------
            >>> s1 = pd.Series([1, np.nan])
            >>> s2 = pd.Series([3, 4])
            >>> s1.combine_first(s2)
            0    1.0
            1    4.0
            dtype: float64
        """

        # combine_first for DataFrame
        """
        combine_first(self, other: 'DataFrame') -> 'DataFrame'

            Update null elements with value in the same location in `other`.

            Combine two DataFrame objects by filling null values in one DataFrame
            with non-null values from other DataFrame. The row and column indexes
            of the resulting DataFrame will be the union of the two.

            Parameters
            ----------
            other : DataFrame
                Provided DataFrame to use to fill null values.

            Returns
            -------
            DataFrame

            See Also
            --------
            DataFrame.combine : Perform series-wise operation on two DataFrames
                using a given function.

            Examples
            --------
            >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
            >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
            >>> df1.combine_first(df2)
                 A    B
            0  1.0  3.0
            1  0.0  4.0

            Null values still persist if the location of that null value
            does not exist in `other`

            >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
            >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
            >>> df1.combine_first(df2)
                 A    B    C
            0  NaN  4.0  NaN
            1  0.0  3.0  1.0
            2  NaN  3.0  1.0
        """

        df1 = pd.DataFrame({'a': [1, None, 2], 'b': [None, 20, 30]})
        df2 = pd.DataFrame({'b': [100, 200, 300, 400], 'c': [None, 2000, 3000, None]})
        sr1 = pd.Series([1, None, None])
        sr2 = pd.Series([100, None, 300], index=[1, 2, 3])

        print(
            f"# 组合合并DataFrame数据集\n"
            f" >>> df1\n"
            f"{df1}\n"
            f" >>> df2\n"
            f"{df2}\n"
            f"{'-'*80}\n"
            f" >>> df1.combine_first(df2)\n"
            f"{df1.combine_first(df2)}\n"
            f"{'-'*80}\n"
            f"# 组合合并Series数据集\n"
            f" >>> sr1\n"
            f"{sr1}\n"
            f" >>> sr2\n"
            f"{sr2}\n"
            f" >>> sr1.combine_first(sr2)\n"
            f"{sr1.combine_first(sr2)}"
        )


        return
