# coding: utf8

import numpy as np
import pandas as pd


class QueryDemo:

    def __init__(self):
        self.df = pd.DataFrame(
            np.arange(60, 84).reshape(12, 2),
            columns=['math', 'science'],
            index=pd.MultiIndex.from_product(
            [[1, 2, 3], ['t1', 't2', 't3', 't4']]),
            dtype=np.uint8)

    def query(self):
        df = self.df

        print(' >>> df\n'
              f'{df}')

        print(" --- query typical examples ---")

        print(" # 引用列名进行数据检索查询：df.query('math > 65 and science < 75')\n"
              f"{df.query('math > 65 and science < 75')}")

        print(" # 支持算术运算符和逻辑运算符\n"
              "df.query('math % 5 == 0')\n"
              f"{df.query('math % 5 == 0')}")

        score = 75
        print(" # 引用外部变量查询\n"
              " >>> score = 75\n"
              " >>> df.query('math > 65 and science < @score')\n"
              f"{df.query('math > 65 and science < @score')}")

        sum_score = df['math'] + df['science']
        print(" # 引用列数据运算结果\n"
              "sum_score = df.math + df.science\n"
              "df.query('@sum_score < 150')\n"
              f"{df.query('@sum_score < 155 and math > 65')}")

        print(" # 引用列数据之间运算结果\n"
              " >>> df.query('math > 65 and math < science*0.65')\n"
              f"df.query('math > 65 and math+science > 150')")

        mean_score = df.mean().max()
        print("引用数据集本身时，使用外部变量方式\n"
              "mean_score = df.mean().max()\n"
              "df.query('math > @mean_score')\n"
              f"{df.query('math > @mean_score')}")

        print("--- 在表达式中使用索引 ---\n"
              "单索引时，在表达式中引用索引，可以使用index作为变量\n"
              f"{df.unstack()}\n"
              " >>> df.query('index < 3')\n"
              f"{df.unstack().query('index < 3')}\n"
              " # 当索引没有指定名称时，可以使用ilevel_[level]: \n"
              " >>> df.query('ilevel_0 < 3'\n"
              f"{df.unstack().query('ilevel_0 < 3')}")

        print("多索引时，引用索引层次名称作为变量\n"
              """在多索引时引用层次名，未命名层次名时使用ilevel_[level]：\n
              >>> df.query('ilevel_1 < "t2"')\n"""
              f"""{df.query('ilevel_1 < "t2"')}""")

        df.index.names = ['num', 'syntax']
        print("命名层次名后，在表达式中引用层次名作为列变量\n"
              "df.index.names = ['num', 'syntax']\n"
              """df.query('num < 3 and syntax > "t2"')\n"""
              f'''{df.query('num < 3 and syntax > "t2"')}''')

        print(" # ==、!= 用于list时， 等同于 in, not in\n"
              " # 使用：df.query('math == [72, 66]')\n"
              " # 等同：df.query('math in [72, 66]')\n"
              " # 等同：df.query('[72, 66] in math')\n"
              f"{df.query('math == [72, 66]')}\n"
              " # 使用：df.query('math != [60, 62, 64, 66, 78, 80]')\n"
              " # 等同：df.query('math not in [60, 62, 64, 66, 78, 80]')\n"
              f"{df.query('math != [60, 62, 64, 66, 78, 80]')}")

        df.science = df.science - pd.Series(list(range(len(df.science))), index=df.science.index)
        print("\n列运算\n"
              f"{df}\n"
              " >>> df.science = df.science - pd.Series(list(range(len(df.science))), index=df.science.index)\n"
              " >>> df.query('math in science')\n"
              f"{df.query('math in science')}")
        return

    def query_test(self, test_size=10000):
        # test_size = 1000000
        df = pd.DataFrame(
            data=np.random.random((test_size, 3)),
            columns=list('abc')
        )
        print(f"{'df.values.nbytes={}'.format(df.values.nbytes)}")

        import time

        # syntax loc
        st = time.time()
        df1 = df.loc[(df.a < 0.5) & (df.b > 0.5) & (df.c < 0.3)]
        print("loc consume: {:.6f}, df len={}".format(time.time()-st, len(df1)))

        # syntax query
        st = time.time()
        df1 = df.query("a < 0.5 and b > 0.5 and c < 0.3")
        # print(time.time()-st)
        print("query consume: {:.6f}".format(time.time()-st))

    def filter(self):
        df = self.df.unstack()

        print_str = (\
            ' >>> df\n'
            f'{df}\n'
            " --- filter typical examples ---\n"
    
            " >>> df.filter(items=[('math', 't2'), ('science', 't3')])\n"
            f"{df.filter(items=[('math', 't2'), ('science', 't3')])}\n"
            
    
            " >>> df.filter(items=[1, 2], axis=0)\n"
            f"{df.filter(items=[1, 2], axis=0)}\n"
    
            " >>> df.filter(like='sc')\n"
            f"{df.filter(like='sc')}\n"
    
            " >>> df.filter(like='t2', axis=1)\n"
            f"{df.filter(like='t2', axis=1)}\n"
    
            " >>> df.filter(like='1', axis=0)\n"
            f"{df.filter(like='1', axis=0)}\n"
    
            " >>> df.filter(like=str(('math', 't2')), axis=1)\n"
            f"{df.filter(like=str(('math', 't2')), axis=1)}\n"
    
            " >>> df.filter(regex='.*[23]')\n"
            f"{df.filter(regex='.*[23]')}\n"
    
            " >>> df.filter(regex='(sc.*[2])|(m.*[1])')\n"
            f"{df.filter(regex='(sc.*[2])|(m.*[1])')}\n"
    
            " >>> df.filter(regex='2', axis=0)\n"
            f"{df.filter(regex='2', axis=0)}\n"
    
            """ >>> df.filter(regex="^[(][']m.*[3]['][)]$")\n"""
            f"""{df.filter(regex="^[(][']m.*[3]['][)]$")}\n"""
        )

        print(print_str)

        return df

    def select_dtypes(self):
        import datetime
        gender_type = pd.CategoricalDtype(['male', 'female'], ordered=True)
        dt = datetime.datetime
        genders = pd.Series(['male', 'female', 'male'], dtype=gender_type)
        df = pd.DataFrame(
            data={'name': ['Xu', 'He', 'Li'],
                  'gender': genders,
                  'age': [18, 20, 19],
                  'birthday': [dt(2002, 3, 15), dt(2000, 10, 10), dt(2001, 9, 1)],
                  'weight': [71.5, 68.3, 77.9],
                  'syntax': [True, False, False],
                  'other': [120, 'sport', np.NaN]
                  }
            )

        print(
            f"{'='*80}\n"
            " >>> df\n"
            f"{df}\n"
            " >>> df.info()\n"
            f"{df.info()}"

            f'{"-"*80}\n'
            "# \n"
            " >>> df.select_dtypes(include=['int'])\n"
            f"{df.select_dtypes(include=['int'])}\n"

            f'{"-"*80}\n'
            "# \n"
            " >>> df.select_dtypes(include=['int64'])\n"
            f"{df.select_dtypes(include=['int64'])}\n"

            f'{"-"*80}\n'
            "# \n"
            " >>> df.select_dtypes(include=[np.int64])"
            f"{df.select_dtypes(include=[np.int64])}\n"

            f'{"-"*80}\n'
            "# \n"
            " >>> df.select_dtypes(include=['object', 'category'])\n"
            f"{df.select_dtypes(include=['object', 'category'])}\n"
    
            f'{"-"*80}\n'
            "# \n"
            " >>> df.select_dtypes(include=['object', 'datetime64[ns]', bool], exclude=['float64'])\n"
            f"{df.select_dtypes(include=['object', 'datetime64[ns]', bool], exclude=['float64'])}\n"
    
            f'{"-"*80}\n'
            "# \n"
            ">>> df.select_dtypes(include=[np.datetime64, 'category'], exclude=['float64', bool])\n"
            f"{df.select_dtypes(include=[np.datetime64, 'category'], exclude=['float64', bool])}\n"

            # print("="*80)
            )
        return df

    def eval(self):
        """
        eval(self, expr, inplace=False, **kwargs)
            Evaluate a string describing operations on DataFrame columns.

            Operates on columns only, not specific rows or elements.  This allows
            `eval` to run arbitrary code, which can make you vulnerable to code
            injection if you pass user input to this function.

            Parameters
            ----------
            expr : str
                The expression string to evaluate.
            inplace : bool, default False
                If the expression contains an assignment, whether to perform the
                operation inplace and mutate the existing DataFrame. Otherwise,
                a new DataFrame is returned.
            **kwargs
                See the documentation for :func:`eval` for complete details
                on the keyword arguments accepted by
                :meth:`~pandas.DataFrame.query`.

            Returns
            -------
            ndarray, scalar, pandas object, or None
                The result of the evaluation or None if ``inplace=True``.

            See Also
            --------
            DataFrame.query : Evaluates a boolean expression to query the columns
                of a frame.
            DataFrame.assign : Can evaluate an expression or function to create new
                values for a column.
            eval : Evaluate a Python expression as a string using various
                backends.

            Notes
            -----
            For more details see the API documentation for :func:`~eval`.
            For detailed examples see :obj:`enhancing performance with eval
            <enhancingperf.eval>`.
            Examples
            --------
            >>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
            >>> df
               A   B
            0  1  10
            1  2   8
            2  3   6
            3  4   4
            4  5   2
            >>> df.eval('A + B')
            0    11
            1    10
            2     9
            3     8
            4     7
            dtype: int64

            Assignment is allowed though by default the original DataFrame is not
            modified.
            >>> df.eval('C = A + B')
               A   B   C
            0  1  10  11
            1  2   8  10
            2  3   6   9
            3  4   4   8
            4  5   2   7
            >>> df
               A   B
            0  1  10
            1  2   8
            2  3   6
            3  4   4
            4  5   2

            Use ``inplace=True`` to modify the original DataFrame.

            >>> df.eval('C = A + B', inplace=True)
            >>> df
               A   B   C
            0  1  10  11
            1  2   8  10
            2  3   6   9
            3  4   4   8
            4  5   2   7

            Multiple columns can be assigned to using multi-line expressions:

            >>> df.eval(
            ...     '''
            ... C = A + B
            ... D = A - B
            ... '''
            ... )
               A   B   C  D
            0  1  10  11 -9
            1  2   8  10 -6
            2  3   6   9 -3
            3  4   4   8  0
            4  5   2   7  3
        """

        # module method
        """
        eval(expr, parser='pandas', engine: Union[str, NoneType] = None, truediv=<object object at 0x000001CCEBC8B3D0>, 
            local_dict=None, global_dict=None, resolvers=(), level=0, target=None, inp
            lace=False)
            Evaluate a Python expression as a string using various backends.
        
            The following arithmetic operations are supported: ``+``, ``-``, ``*``,
            ``/``, ``**``, ``%``, ``//`` (python engine only) along with the following
            boolean operations: ``|`` (or), ``&`` (and), and ``~`` (not).
            Additionally, the ``'pandas'`` parser allows the use of :keyword:`and`,
            :keyword:`or`, and :keyword:`not` with the same semantics as the
            corresponding bitwise operators.  :class:`~pandas.Series` and
            :class:`~pandas.DataFrame` objects are supported and behave as they would
            with plain ol' Python evaluation.
        
            Parameters
            ----------
            expr : str
                The expression to evaluate. This string cannot contain any Python
                `statements
                <https://docs.python.org/3/reference/simple_stmts.html#simple-statements>`__,
                only Python `expressions
                <https://docs.python.org/3/reference/simple_stmts.html#expression-statements>`__.
            parser : {'pandas', 'python'}, default 'pandas'
                The parser to use to construct the syntax tree from the expression. The
                default of ``'pandas'`` parses code slightly different than standard
                Python. Alternatively, you can parse an expression using the
                ``'python'`` parser to retain strict Python semantics.  See the
                :obj:`enhancing performance <enhancingperf.eval>` documentation for
                more details.
            engine : {'python', 'numexpr'}, default 'numexpr'
        
                The engine used to evaluate the expression. Supported engines are
        
                - None         : tries to use ``numexpr``, falls back to ``python``
                - ``'numexpr'``: This default engine evaluates pandas objects using
                                 numexpr for large speed ups in complex expressions
                                 with large frames.
                - ``'python'``: Performs operations as if you had ``eval``'d in top
                                level python. This engine is generally not that useful.
        
                More backends may be available in the future.
        
            truediv : bool, optional
                Whether to use true division, like in Python >= 3.
        
                .. deprecated:: 1.0.0
        
            local_dict : dict or None, optional
                A dictionary of local variables, taken from locals() by default.
            global_dict : dict or None, optional
                A dictionary of global variables, taken from globals() by default.
            resolvers : list of dict-like or None, optional
                A list of objects implementing the ``__getitem__`` special method that
                you can use to inject an additional collection of namespaces to use for
                variable lookup. For example, this is used in the
                :meth:`~DataFrame.query` method to inject the
                ``DataFrame.index`` and ``DataFrame.columns``
                variables that refer to their respective :class:`~pandas.DataFrame`
                instance attributes.
            level : int, optional
                The number of prior stack frames to traverse and add to the current
                scope. Most users will **not** need to change this parameter.
            target : object, optional, default None
                This is the target object for assignment. It is used when there is
                variable assignment in the expression. If so, then `target` must
                support item assignment with string keys, and if a copy is being
                returned, it must also support `.copy()`.
            inplace : bool, default False
                If `target` is provided, and the expression mutates `target`, whether
                to modify `target` inplace. Otherwise, return a copy of `target` with
                the mutation.
        
            Returns
            -------
            ndarray, numeric scalar, DataFrame, Series, or None
                The completion value of evaluating the given code or None if ``inplace=True``.
        
            Raises
            ------
            ValueError
                There are many instances where such an error can be raised:
        
                - `target=None`, but the expression is multiline.
                - The expression is multiline, but not all them have item assignment.
                  An example of such an arrangement is this:
        
                  a = b + 1
                  a + 2
        
                  Here, there are expressions on different lines, making it multiline,
                  but the last line has no variable assigned to the output of `a + 2`.
                - `inplace=True`, but the expression is missing item assignment.
                - Item assignment is provided, but the `target` does not support
                  string item assignment.
                - Item assignment is provided and `inplace=False`, but the `target`
                  does not support the `.copy()` method
        
            See Also
            --------
            DataFrame.query : Evaluates a boolean expression to query the columns
                    of a frame.
            DataFrame.eval : Evaluate a string describing operations on
                    DataFrame columns.
        
            Notes
            -----
            The ``dtype`` of any objects involved in an arithmetic ``%`` operation are
            recursively cast to ``float64``.
        
            See the :obj:`enhancing performance <enhancingperf.eval>` documentation for
            more details.
        
            Examples
            --------
            >>> df = pd.DataFrame({"animal": ["dog", "pig"], "age": [10, 20]})
            >>> df
              animal  age
            0    dog   10
            1    pig   20
        
            We can add a new column using ``pd.eval``:
        
            >>> pd.eval("double_age = df.age * 2", target=df)
              animal  age  double_age
            0    dog   10          20
            1    pig   20          40
        """

        df1 = pd.DataFrame(
            data={'A': [1, 2, 3], 'B': [10, 20, 30], 'C': [100, 200, 300]}
        )
        df2 = pd.DataFrame(
            data={'A': [1, 1, 1], 'B': [2, 2, 2], 'C': [3, 3, 3]}
        )
        df3 = pd.DataFrame(
            data={'A': [2, 2, 2], 'B': [10, 100, 1000], 'C': [1, 2, 3]}
        )

        def fun(df):
            return df.apply(lambda y: y**2)

        scalar = 100

        # r = pd.eval("df.A * scalar", local_dict={'df': df, 'scalar': scalar})
        # return r
        # print(df.eval("A+B"))
        # print(locals())

        _value = 100
        print(
            # f"{'df' in locals()}\n"
            # f"{'df' in globals().keys()}\n"

            f" >>> df1\n"
            f"{df1}\n\n"
            f" >>> df2\n"
            f"{df2}\n\n"
            f" >>> df3\n"
            f"{df3}\n\n"

            "# 不能使用外部函数\n"
            " >>> fun = lambda x: x.apply(lambda v: v**2)\n"
            " >>> fun(df1)\n"
            f"{fun(df1)}\n"
            f" >>> fun(df1.A)\n"
            f"{fun(df1.A)}\n"
            " >>> df1.eval('fun(A)')\n"
            # f"{df1.eval('fun(A)')}\n"
            f'ValueError: "fun" is not a supported function\n'
            " >>> pd.eval('fun(df1.A)')\n"
            # f"{pd.eval('fun(df1.A)')}\n"
            f'ValueError: "fun" is not a supported function\n\n'
            
            "# 算术运算\n"
            "# 在一些环境中，调用的外部变量不在locals中，需要进行loca_dict设置\n"
            " >>> pd.eval('df1.A * scalar + df1.B', local_dict={'df1': df1, 'scalar': scalar})\n"
            f"{pd.eval('df1.A * scalar + df1.B', local_dict={'df1': df1, 'scalar': scalar})}\n\n"
            
            f" >>> df1.eval('A > 1 and B < 30 and C > 50')\n"
            f"{df1.eval('A > 1 and B < 30 and C > 50')}\n\n"
            
            f"{'='*80}\n"
            "# 算术运算\n"
            f"{'-'*80}\n"
            "# 数据集算术运算\n"
            " >>> pd.eval('df1 + df2/df3)')\n"
            f"{pd.eval('df1 + df2/df3')}\n"

            f"{'-'*80}\n"
            "# 列算术运算\n"
            " >>> pd.eval('df1.A + df2.B/df3.C)')\n"
            f"{pd.eval('df1.A + df2.B/df3.C')}\n"
            ""

            f"{'='*80}\n"
            "# 逻辑运算\n"

            f"{'-'*80}\n"
            f"# 数据集逻辑运算\n"
            " >>> pd.eval('df1 > df2 or df2 > df3')\n"
            f"{pd.eval('df1 > df2 or df2 > df3')}\n"

            f"{'-.'*40}\n"
            f"# 列逻辑运算\n"
            " >>> _value = 100\n"
            " >>> pd.eval('df1.A < 3 | df1.C > _value ')\n"
            f"{pd.eval('df1.A < 3 | df1.C > _value', local_dict={'_value': 100, 'df1': df1})}\n"

            # f"{'='*80}\n"
            # "# 不支持位运算\n"
            # f"{'-'*80}\n"
            # f"# \n"
            # " >>> 'df1.A & 1\n"
            # f"{pd.eval('df1.A & 1')}\n"
            
            f"# 设置target=df1和inplace=True，在数据集中df1生成列D，并改变原数据集\n"
            f" >>> pd.eval('D = (df1.A + df1.B + df1.C)/3', target=df1, inplace=True)\n"
            f"{pd.eval('D = (df1.A + df1.B + df1.C)/3', target=df1, inplace=True)}"
            f">>> df1\n"
            f"{df1}"
        )
        return df1

    # move from data_view

    def data_head_tail(self):
        df = self.df_list

        print(" >>> df\n"
              f"{df}"
              " >>> df.head(2)\n"
              f"{df.head(2)}\n"
              " >>> df.tail(2)\n"
              f" {df.tail(2)}")
        return

    def data_sample(self):
        """
        sample(self: ~FrameOrSeries, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None)
              -> ~FrameOrSeries

            Return a random sample of items from an axis of object.

            You can use `random_state` for reproducibility.

            Parameters
            ----------
            n : int, optional
                Number of items from axis to return. Cannot be used with `frac`.
                Default = 1 if `frac` = None.
            frac : float, optional
                Fraction of axis items to return. Cannot be used with `n`.
            replace : bool, default False
                Allow or disallow sampling of the same row more than once.
            weights : str or ndarray-like, optional
                Default 'None' results in equal probability weighting.
                If passed a Series, will align with target object on index. Index
                values in weights not found in sampled object will be ignored and
                index values in sampled object not in weights will be assigned
                weights of zero.
                If called on a DataFrame, will accept the name of a column
                when axis = 0.
                Unless weights are a Series, weights must be same length as axis
                being sampled.
                If weights do not sum to 1, they will be normalized to sum to 1.
                Missing values in the weights column will be treated as zero.
                Infinite values not allowed.
            random_state : int, array-like, BitGenerator, np.random.RandomState, optional
                If int, array-like, or BitGenerator (NumPy>=1.17), seed for
                random number generator
                If np.random.RandomState, use as numpy RandomState object.

                .. versionchanged:: 1.1.0

                    array-like and BitGenerator (for NumPy>=1.17) object now passed to
                    np.random.RandomState() as seed
            axis : {0 or ‘index’, 1 or ‘columns’, None}, default None
                Axis to sample. Accepts axis number or name. Default is stat axis
                for given data type (0 for Series and DataFrames).

            Returns
            -------
            Series or DataFrame
                A new object of same type as caller containing `n` items randomly
                sampled from the caller object.

            See Also
            --------
            DataFrameGroupBy.sample: Generates random samples from each group of a
                DataFrame object.
            SeriesGroupBy.sample: Generates random samples from each group of a
                Series object.
            numpy.random.choice: Generates a random sample from a given 1-D numpy
                array.

            Notes
            -----
            If `frac` > 1, `replacement` should be set to `True`.

            Examples
            --------
            >>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
            ...                    'num_wings': [2, 0, 0, 0],
            ...                    'num_specimen_seen': [10, 2, 1, 8]},
            ...                   index=['falcon', 'dog', 'spider', 'fish'])
            >>> df
                    num_legs  num_wings  num_specimen_seen
            falcon         2          2                 10
            dog            4          0                  2
            spider         8          0                  1
            fish           0          0                  8

            Extract 3 random elements from the ``Series`` ``df['num_legs']``:
            Note that we use `random_state` to ensure the reproducibility of
            the examples.

            >>> df['num_legs'].sample(n=3, random_state=1)
            fish      0
            spider    8
            falcon    2
            Name: num_legs, dtype: int64

            A random 50% sample of the ``DataFrame`` with replacement:

            >>> df.sample(frac=0.5, replace=True, random_state=1)
                  num_legs  num_wings  num_specimen_seen
            dog          4          0                  2
            fish         0          0                  8

            An upsample sample of the ``DataFrame`` with replacement:
            Note that `replace` parameter has to be `True` for `frac` parameter > 1.

            >>> df.sample(frac=2, replace=True, random_state=1)
                    num_legs  num_wings  num_specimen_seen
            dog            4          0                  2
            fish           0          0                  8
            falcon         2          2                 10
            falcon         2          2                 10
            fish           0          0                  8
            dog            4          0                  2

            Using a DataFrame column as weights. Rows with larger value in the
            `num_specimen_seen` column are more likely to be sampled.

            >>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
                    num_legs  num_wings  num_specimen_seen
            falcon         2          2                 10
            fish           0          0                  8
        """
        data = np.random.randint(0, 10, (10, 3))
        df = pd.DataFrame(data=data, columns=list('ABC'))
        for j in range(len(data)):
            df.iloc[j] += j * 100
        print(
            " >>> df\n"
            f"{df}\n"
            " # 缺省情况下axis=0，随机抽取数据行，设置n=3抽取3行数据\n"
            " >>> df.sample(3)\n"
            f"{df.sample(3)}\n"

            f" # 设置axis=1，沿列向抽取，设置n=2，随机抽取2列数据\n"
            " >>> df.sample(2, axis=1)\n"
            f"{df.sample(2, axis=1)}\n"

            f" # 设置frac按比例随机抽取数据行, 同时设置random_stat保证多次抽样相同\n"
            " >>> df.sample(frac=0.2, random_stat=1)\n"
            f"{df.sample(frac=0.2, random_state=1)}\n"
            " >>> df.sample(frac=0.2, random_stat=1)\n"
            f"{df.sample(frac=0.2, random_state=1)}\n"

            f" # 设置weight为列名，按该列的值为权重，随机抽取数据行\n"
            " >>> df.sample(frac=1.2, weight='B', replace=True)\n"
            f"{df.sample(frac=1.2, weights='B', replace=True)}\n"

            f" # 设置weight为数组，按该数组值为权重，设置axis=1和n=2，随机抽取2列数据\n"
            " >>> df.sample(n=2, weight=[1, 3, 5], axis=1)\n"
            f"{df.sample(n=2, weights=[1, 3, 7], axis=1, random_state=2)}\n"
        )
        return

    def data_xs(self):
        """
        xs(self, key, axis=0, level=None, drop_level: 'bool_t' = True)

            Return cross-section from the Series/DataFrame.

            This method takes a `key` argument to select data at a particular
            level of a MultiIndex.

            Parameters
            ----------
            key : label or tuple of label
                Label contained in the index, or partially in a MultiIndex.
            axis : {0 or 'index', 1 or 'columns'}, default 0
                Axis to retrieve cross-section on.
            level : object, defaults to first n levels (n=1 or len(key))
                In case of a key partially contained in a MultiIndex, indicate
                which levels are used. Levels can be referred by label or position.
            drop_level : bool, default True
                If False, returns object with same levels as self.

            Returns
            -------
            Series or DataFrame
                Cross-section from the original Series or DataFrame
                corresponding to the selected index levels.

            See Also
            --------
            DataFrame.loc : Access a group of rows and columns
                by label(s) or a boolean array.
            DataFrame.iloc : Purely integer-location based indexing
                for selection by position.

            Notes
            -----
            `xs` can not be used to set values.

            MultiIndex Slicers is a generic way to get/set values on
            any level or levels.
            It is a superset of `xs` functionality, see
            :obj:`MultiIndex Slicers <advanced.mi_slicers>`.

            Examples
            --------
            >>> d = {'num_legs': [4, 4, 2, 2],
            ...      'num_wings': [0, 0, 2, 2],
            ...      'class': ['mammal', 'mammal', 'mammal', 'bird'],
            ...      'animal': ['cat', 'dog', 'bat', 'penguin'],
            ...      'locomotion': ['walks', 'walks', 'flies', 'walks']}
            >>> df = pd.DataFrame(data=d)
            >>> df = df.set_index(['class', 'animal', 'locomotion'])
            >>> df
                                       num_legs  num_wings
            class  animal  locomotion
            mammal cat     walks              4          0
                   dog     walks              4          0
                   bat     flies              2          2
            bird   penguin walks              2          2

            Get values at specified index

            >>> df.xs('mammal')
                               num_legs  num_wings
            animal locomotion
            cat    walks              4          0
            dog    walks              4          0
            bat    flies              2          2

            Get values at several indexes

            >>> df.xs(('mammal', 'dog'))
                        num_legs  num_wings
            locomotion
            walks              4          0

            Get values at specified index and level

            >>> df.xs('cat', level=1)
                               num_legs  num_wings
            class  locomotion
            mammal walks              4          0

            Get values at several indexes and levels

            >>> df.xs(('bird', 'walks'),
            ...       level=[0, 'locomotion'])
                     num_legs  num_wings
            animal
            penguin         2          2

            Get values at specified column and axis

            >>> df.xs(('bird', 'walks'),
            ...       level=[0, 'locomotion'])
                     num_legs  num_wings
            animal
            penguin         2          2

            Get values at specified column and axis

            >>> df.xs('num_wings', axis=1)
            class   animal   locomotion
            mammal  cat      walks         0
                    dog      walks         0
                    bat      flies         2
            bird    penguin  walks         2
            Name: num_wings, dtype: int64
        """
        from ch4data import sec0_datasets as dds
        df = dds.DataSet.df5
        df = df.stack()
        df1 = df.unstack().unstack()

        print(
            " # 在多层索引中，指定层次标签值进行数据选取\n"
            " >>> df\n"
            f"{df}\n"
            " # 指定标签值进行检索\n"
            " >>> df.xs((1, 't3'))\n"
            f"{df.xs((1, 't3'))}\n"
            " # 跨层次指定标签值进行检索\n"
            " >>> df.xs(key=[2, 'math'], level=[0, 2])\n"
            f"{df.xs(key=[2, 'math'], level=[0, 2])}\n"

            f"{'-' * 80}\n"
            " # 指定列标签值进行检索\n"
            " >>> df1\n"
            f"{df1}\n"
            f" # 不保留level所指定的索引层的标签"
            " >>> df1.xs(key=['science'], level=0, axis=1)\n"
            f"{df1.xs(key=['science'], level=0, axis=1)}\n"

            f"{'-' * 80}\n"
            " # 保留level所指定的索引层的标签\n"
            " >>> df1.xs(key=['science'], level=0, axis=1, drop_level=False)\n"
            f"{df1.xs(key=['science'], level=0, axis=1, drop_level=False)}"
        )

        return

    def data_get(self):
        """
        get(self, key, default=None)
            Get item from object for given key (ex: DataFrame column).

            Returns default value if not found.

            Parameters
            ----------
            key : object

            Returns
            -------
            value : same type as items contained in object
        """

        data = np.random.randint(1, 9, (5, 3))
        df = pd.DataFrame(data=data, columns=list('ABC'))
        print(
            " >>> df\n"
            f"{df}\n"

            " >>> df.get('B')\n"
            f"{df.get('B')}\n"

            " >>> df.get('D', ValueError)\n"
            f"{df.get('D', ValueError)}\n"

            " >>> df.get(['C', 'A'], ValueError)\n"
            f"{df.get(['C', 'A'], ValueError)}"
        )

        return

    def data_take(self):
        """
        take(self: 'FrameOrSeries', indices, axis=0, is_copy: 'Optional[bool_t]' = None, **kwargs)
            -> 'FrameOrSeries'

            Return the elements in the given *positional* indices along an axis.

            This means that we are not indexing according to actual values in
            the index attribute of the object. We are indexing according to the
            actual position of the element in the object.

            Parameters
            ----------
            indices : array-like
                An array of ints indicating which positions to take.
            axis : {0 or 'index', 1 or 'columns', None}, default 0
                The axis on which to select elements. ``0`` means that we are
                selecting rows, ``1`` means that we are selecting columns.
            is_copy : bool
                Before pandas 1.0, ``is_copy=False`` can be specified to ensure
                that the return value is an actual copy. Starting with pandas 1.0,
                ``take`` always returns a copy, and the keyword is therefore
                deprecated.

                .. deprecated:: 1.0.0
            **kwargs
                For compatibility with :meth:`numpy.take`. Has no effect on the
                output.

            Returns
            -------
            taken : same type as caller
                An array-like containing the elements taken from the object.

            See Also
            --------
            DataFrame.loc : Select a subset of a DataFrame by labels.
            DataFrame.iloc : Select a subset of a DataFrame by positions.
            numpy.take : Take elements from an array along an axis.

            Examples
            --------
            >>> df = pd.DataFrame([('falcon', 'bird', 389.0),
            ...                    ('parrot', 'bird', 24.0),
            ...                    ('lion', 'mammal', 80.5),
            ...                    ('monkey', 'mammal', np.nan)],
            ...                   columns=['name', 'class', 'max_speed'],
            ...                   index=[0, 2, 3, 1])
            >>> df
                 name   class  max_speed
            0  falcon    bird      389.0
            2  parrot    bird       24.0
            3    lion  mammal       80.5
            1  monkey  mammal        NaN

            Take elements at positions 0 and 3 along the axis 0 (default).

            Note how the actual indices selected (0 and 1) do not correspond to
            our selected indices 0 and 3. That's because we are selecting the 0th
            and 3rd rows, not rows whose indices equal 0 and 3.

            >>> df.take([0, 3])
                 name   class  max_speed
            0  falcon    bird      389.0
            1  monkey  mammal        NaN

            Take elements at indices 1 and 2 along the axis 1 (column selection).

            >>> df.take([1, 2], axis=1)
                class  max_speed
            0    bird      389.0
            2    bird       24.0
            3  mammal       80.5
            1  mammal        NaN

            We may take elements using negative integers for positive indices,
            starting from the end of the object, just like with Python lists.

             >>> df.take([-1, -2])
                 name   class  max_speed
            1  monkey  mammal        NaN
            3    lion  mammal       80.5
        """

        df = self.df_list
        print(
            f" >>> df\n"
            f"{df}\n"

            f" # 按照位置选取行数据\n"
            f" >>> df.take([1, 0])\n"
            f"{df.take([1, 0])}\n"

            f" # 按照位置选取列数据\n"
            f" >>> df.take([2, 0], axis=1)\n"
            f"{df.take([2, 0], axis=1)}\n"

            f" # 按照位置选取行、列数据\n"
            f" >>> df.take([2, 0], axis=1).take([1, 0], axis=0)\n"
            f"{df.take([2, 0], axis=1).take([1, 0], axis=0)}"
        )

        return


class QueryDemo2:
    """
    query eamples
    """

    def __init__(self):
        self.df = pd.DataFrame(np.arange(12).reshape(3, 4), columns=list('ABCD'))

    def query_eval_cmp(self):
        df = self.df
        # 实例1.1：python,numexpr 方式比较
        print("df")
        print(df)
        print("df[(df.A < 8) & (df.B < 9)]")
        result1 = df[(df.A < 8) & (df.B < 9)]                  # python方式
        print(result1)
        print("pd.eval('df[(df.A < 8) & (df.B < 9)]')")
        result2 = pd.eval('df[(df.A < 8) & (df.B < 9)]')       # numexpr 方式
        print(result2)
        print("np.allclose(result1, result2)")
        np.allclose(result1, result2)                       # True

        # 实例1.2：eval 与 query 的比较
        # 相同点：计算表达式结果
        # 不同点：eval若表达式为逻辑，结果返回bool数组， query则返回bool数组的数据
        import numexpr
        print("df[df.eval('A<8 & B<9')]")
        result3 = df[df.eval('A<8 & B<9')]
        print(result3)
        print("df.query('A < 8 and B < 9')")
        result4 = df.query('A < 8 and B < 9')
        print(result4)
        print("result3.equals(result4)")
        result3.equals(result4)
        # True: result1==result2==result3==result4
        a = df.A
        b = df.B
        result5 = df[numexpr.evaluate('(a<8) &(b < 9)')]    # 等效；表达式不能含df.A
        print("a = df.A\nb = df.B\ndf[numexpr.evaluate('(a<8) &(b < 9)')]")
        print(result5)

    def query_local_var(self):
        df = self.df
        # 实例2：使用@标记本地变量
        Cmean = df['C'].mean()  # 6.0
        result1 = df[(df.A < Cmean) & (df.B < Cmean)]
        result1 = df.query('A < @Cmean and B < @Cmean')     # 等价

    def query_index(self):
        # query expression with column name, index, index level
        df = self.df

        # 使用列名
        df.query('(A < B) & (B < C)')           # numexpr方式 A,B,C为列名

        # 单索引名+列名
        df.index.names=['a']
        df.query('a < B and B < C')             # a为单索引名，B,C为列名
        df.query('index < B < C')               # index为单索引(非索引名)，B,C为列名

        # 单索引名a与列名a相同
        df.query('a > 2')                       # 用列'a',单索引名a与列名a相同列名称优先
        df.query('index > 2')                   # index为单索引(非索引名),单索引名与列名相同时, 列名称优先

        # 列名为index- 应该考虑将列重命名
        df.index.names = [None]
        print("df.index.names = [None]")
        print("df.query('ilevel_0 > 1')")
        print(df.query('ilevel_0 > 1'))         # ilevel_0为单索引(非索引名)

    def query_multi_index(self):
        # use index name in query expression
        colors = np.array(['red', 'red', 'blue', 'blue', 'blue', 'blue'], dtype='<U4')
        foods = np.array(['eggs', 'meat', 'eggs', 'meat', 'eggs', 'meat'], dtype='<U4')
        index = pd.MultiIndex.from_arrays([colors, foods], names=['color', 'food'])
        df = pd.DataFrame(np.arange(12).reshape(6, 2), index=index)
        # 索引名引用
        print(df.query('color == "red"'))           # 直接使用索引级名
        # 索引无名称时各级索引的引用
        df.index.names = [None, None]
        print(df.query('ilevel_0 == "red"'))        # 第0级的索引级别: ilevel_0
        print(df.query('ilevel_1 == "meat"'))       # 第1级的索引级别: ilevel_1

    def query_map_fun(self):
        # arithmetic expression with column name as var
        df1 = pd.DataFrame(np.arange(12).reshape(4, 3), columns=list('abc'))
        df1 = df1 + 10
        df2 = df1 + 10
        expr = '19 <= a <= c <= 22'
        result = list(map(lambda frame: frame.query(expr), [df1, df2]))
        print(result)

    def query_logic_expression(self):
        # 逻辑运算符
        df = pd.DataFrame(np.random.randint(10, size=(10, 3)), columns=list('ABC'))
        df.query('(A< B) & (B< C)')        # 等同于：df[(df.A < df.B) & (df.B < df.C)]
        df.query('A< B & B < C')           # 逻辑运算符优先级
        df.query('A< B and B < C')         # and 等同 &
        df.query('A < B < C')              # 以上各式全部等价

    def query_notin_list(self):
        # 实例6.2：==操作符与list对象的特殊用法
        # == / !=，类似 in / not in
        df = pd.DataFrame(np.random.randint(10, size=(10, 3)), columns=list('ABC'))
        df.query('b == ["a", "b", "c"]')    # 等同于：df[df.b.isin(["a", "b", "c"])]
        df.query('c == [1, 2]')
        df.query('c != [1, 2]')
        # using in / not in
        df.query('[1, 2] in c')
        df.query('[1, 2] not in c')        # 等同于：df[df.c.isin([1, 2])]

    def query_column_notin(self):
        # 实例6.3：in 与 not in
        # 与其他表达式结合, 获得非常简洁查询
        # 等同于：df[df.b.isin(df.a) & (df.c < df.d)]
        df = pd.DataFrame({'a': list('abcdef'), 'b': list('fedfed'), 'c': 5, 'd':5})
        print(df)

        print("df.query('a in b and c < d')")
        r = df.query('a in b and c < d')
        print(r)

        print("df[df.a.isin(df.b)]")
        result1 = df[df.a.isin(df.b)]
        print(result1)

        print("df.query('a not in b')")
        result2 = df.query('a not in b')
        print(result2)

        print("df[~df.a.isin(df.b)]")
        result3 = df[~df.a.isin(df.b)]      # pure Python
        print(result3)

    def query_bool(self):
        # 使用布尔运算符not 或 ~ 运算符， 形成否定布尔表达式
        df = pd.DataFrame(np.arange(9).reshape(3, 3), columns=list('ABC'))
        df['bools'] = df.eval('C >= 5')
        print('df')
        print(df)

        # print("df['bools']=df.eval('C >= 5')")
        print("df['bools']")
        result11 = df['bools']
        print(result11)
        print("df.eval('C >= 5')")
        print(df.eval('C >= 5'))
        print("df.query('not bools')")
        result2 = df.eval('C >= 5')
        print(result2)
        print("df[~df.bools]")
        print(df[~df.bools])

        # 复杂表达式：
        result3 = df.query('A < B < C and (not bools) or bools > 2')                   # 短查询语法
        result4 = df[(df.A < df.B) & (df.B < df.C) & (~df.bools) | (df.bools > 2)]    # 等效于纯Python
        print("df.query('A < B < C and (not bools) or bools > 2')")
        print(result3)
        print("df[(df.A < df.B) & (df.B < df.C) & (~df.bools) | (df.bools > 2)]")
        print(result4)
