# coding: utf8

import pandas as pd
import numpy as np

df1 = pd.DataFrame(
    data={'sno': ['1001', '1002', '1003'],
          'name': ['李明', '张力', '韩中'],
          'score': [95, 88, 98]
          }
    )
df2 = pd.DataFrame(
    data={'sno': ['1001', '1002', '1003'],
          'name': ['李明', '张力', '韩中'],
          'score': [95, 88, 98]
          },
    index=list('abc')
    )


def dataframe_construct():
    # 从列表数据创建DataFrame
    data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=np.int8)
    df = pd.DataFrame(data, columns=list('abc'))
    print("# 从列表数据创建DataFrame\n"
          ">>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)], dtype=np.int8)")
    print(">>> df = pd.DataFrame(data, columns=list('abc'))")
    print(">>> df")
    print(df)

    # 从字典创建DataFrame
    df = pd.DataFrame(data={'col1': [1, 2], 'col2': ['3a', '4a']}, dtype=np.float16)
    print("# 从字典创建DataFrame, 指定统一转换的数据类型，不能转换时转为object\n"
          ">>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]}, dtype=np.float16)")
    print(">>> df")
    print(df)
    print(df.dtypes)

    # 从结构化数组创建DataFrame
    data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)],
                    dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")])
    df = pd.DataFrame(data, columns=['c', 'a'])
    print(
        """
    # 从结构化数组创建DataFrame
    >>> data = np.array([(1, 2, 3), (4, 5, 6), (7, 8, 9)],
                        dtype=[("a", "i4"), ("b", "i4"), ("c", "i4")])
    >>> df = pd.DataFrame(data, columns=['c', 'a'])
        """
    )
    print(">>> df")
    print(df)

    df = pd.DataFrame(
            data={'sno': ['1001', '1002', '1003'],
                  'name': ['李明', '张力', '韩中'],
                  'score': [95, 88, 98]
                  },
            index=[3, 5, 9]
            )
    print("""
    # 指定索引值
    >>> df = pd.DataFrame(
    ...     data={'sno': ['1001', '1002', '1003'],
    ...           'name': ['李明', '张力', '韩中'],
    ...           'score': [95, 88, 98]
    ...           }，
    ...     index=[3, 5. 9]
    ...     )""")
    print(df)


def dataframe_slice():
    """
    DataFrame切片示例：
    （1）下标仅支持行号切片。
    （2）iloc支持行、列序号切片。
    （3）loc支持行、列索引标签切片。
    注意：1）使用loc切片时，如果行列索引使用了整数标签值，可能会与行号切片等同。
         2）标签切片包括首尾标签指向的数据，下标和iloc切片不包括尾部序号对应的数据。
    """

    print(">>> df1")
    print(df1)
    print(">>> df1[0:1]")
    print(df1[0:1])

    print(">>> df1.iloc[1:2, 1:3]")
    print(df1.iloc[1:2, 1:3])

    print(">>> df2")
    print(df2)
    print(">>> df2.loc['b':'c', 'name':'score']")
    print(df2.loc['b':'c', 'name':'score'])


def dataframe_select_data():
    """
    DataFrame条件筛选示例
    （1）下标方式支持布尔值序列进行行数据筛选，可以使用条件表达式生成布尔序列。
    （2）loc方式支持行布尔值序列，列名称序列进行数据筛选。
    （3）query方法支持条件筛选行数据。
    （4）filter方法支持使用标签值的三种筛选方法：序列、包含、正则表达式。
    """
    print(">>> df1")
    print(df1)
    print(">>> df1[(df1.score >= 90)]")
    print(df1[(df1.score >= 90)])

    print(">>> df1.loc[(df1.sno > '1001') & (df1.score > 90), ['name', 'score']]")
    dfr = df1.loc[(df1.sno > '1001') & (df1.score > 90), ['name', 'score']]
    print(dfr)

    print(""">>> df1.query("score>=90 and sno>='1002'")""")
    dfr = df1.query("""score>=90 and sno>='1002'""")
    print(dfr)

    print(">>> df1.filter(items=[2], axis=0)")
    dfr = df1.filter(items=[2], axis=0)
    print(dfr)
    print(">>> df1.filter(like='n')")
    dfr = df1.filter(like='n')
    print(dfr)


def dataframe_groupby():
    df = pd.DataFrame(
        data=np.eye(3, 4),
        columns=list('abcd'),
        dtype=np.int8
        )
    print(">>> df")
    print(df)
    g1 = df.groupby(by={0: 'k1', 1: 'k2', 2: 'k2'}, axis=0)
    print(
        ">>> g1 = df.groupby(by={0: 'k1', 1: 'k2', 2: 'k2'}, axis=0)\n"
        ">>> g1.groups\n"
        f"{g1.groups}\n"
        ">>> g1.get_group('k2')\n"
        f"{g1.get_group('k2')}"
    )

    g2 = df.groupby(by={'a': 1, 'b': 1, 'c': 2, 'd': 3}, axis=1)
    print(
        "# 在列方向，使用映射分组\n"
        ">>> g2 = df.groupby(by={'a': 1, 'b': 1, 'c': 2, 'd': 3}, axis=1)\n"
        ">>> g2.groups\n"
        f"{g2.groups}\n"
        ">>> g2.get_group(1)\n"
        f"{g2.get_group(1)}"
    )

    g3 = df.groupby(by=lambda x: x % 2)
    print(
        "# 使用函数进行分组\n"
        ">>> g3 = df.groupby(by=lambda x: x % 2)\n"
        ">>> g3.groups\n"
        f"{g3.groups}\n"
    )
    print(g3.get_group(0))

    g4 = df.groupby(by='a')
    print(g4.groups)
    print(
        "# 根据列值进行分组\n"
        ">>> g4 = df.groupby(by='a')\n"
        ">>> g4.groups\n"
        f"{g4.groups}"
    )
    for gi in range(len(g4.groups)):
        print(f"group{gi}")
        print(g4.get_group(gi))


def dataframe_split(df, k=2, shuffle=True):
    """
    沿行方向划分数据集为k个子数据集
    :param df: 输入数据集
    :param k: 划分个数，缺省为2。如果为列表，为各个子数据集的划分比例, 要求sum(k)为1。
    :return: 划分后子数据集的列表
    raise OverflowError if sum(k) > 1 when k is tuple or list or array
    """
    import itertools
    import operator
    # cut points(row numbers)
    dflen = len(df)
    row_no_list = []
    if isinstance(k, int):
        row_no_list = [int(dflen/k*j) for j in range(k+1)]
    elif isinstance(k, list) or isinstance(k, tuple):
        if sum(k) > 1:
            raise OverflowError
        kc = list(itertools.accumulate(k, operator.add))
        row_no_list = [int(r*dflen) for r in kc]
        # print(kc, row_no_list)
    # shuffle
    dfshuffle = df
    if shuffle:
        row_no = list(range(len(df)))
        np.random.shuffle(row_no)
        dfshuffle = df.loc[row_no]
    # split
    result = []
    for j in range(len(row_no_list)-1):
        result.append(dfshuffle[row_no_list[j]: row_no_list[j+1]])

    return result


def dataframe_merge():
    """merge可以使用类似数据库表的连接方式合并数据集"""
    df1 = pd.DataFrame({'a': ['foo', 'uoo', 'boo', 'foo'],
                        'b': [1, 2, 3, 4],
                        'c': list(range(10, 41, 10))})
    df2 = pd.DataFrame({'a': ['foo', 'woo', 'boo', 'boo'],
                        'b': [100, 200, 300, 400],
                        'd': list(range(10, 41, 10))
                        },
                       index=range(2, 6))
    print(
        ">>> df1\n"
        f"{df1}\n"
        ">>> df2\n"
        f"{df2}\n"
        "# 内连接方式合并\n"
        ">>> np.merge(df1, df2, how='inner', left_on='a', right_on='a')\n"
        f"{pd.merge(df1, df2, how='inner', left_on='a', right_on='a')}\n"
        "# 左连接方式合并\n"
        ">>> np.merge(df1, df2, how='left', left_on='c', right_on='d')\n"
        f"{pd.merge(df1, df2, how='left', left_on='c', right_on='d')}\n"
        ">>> np.merge(df1, df2, how='left', left_index=True, right_index=True)\n"
        f"{pd.merge(df1, df2, how='right', left_index=True, right_index=True)}\n"
        "# 外连接方式合并\n"
        ">>> np.merge(df1, df2, how='outer', left_on='a', right_on='a')\n"
        f"{pd.merge(df1, df2, how='outer', left_on='a', right_on='a')}\n"
    )


def dataframe_join():
    """join为对象方法，以主从方式合并数据集，被连接的数据集只能以索引为键"""
    df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
                       'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
    other = pd.DataFrame({'key': ['K1', 'K3', 'K5'],
                          'B': ['B1', 'B3', 'B5']})
    df_join = df.join(other, lsuffix='_caller', rsuffix='_other')
    print(">>> df")
    print(df)
    print(">>> other")
    print(other)
    print(">>> df_join = df.join(other, lsuffix='_caller', rsuffix='_other')")
    print(df_join)
    df_join2 = df.set_index('key').join(other.set_index('key'))
    print("# 按列作为关键字进行合并时，可以将列转换为索引")
    print(">>> df_join2 = df.set_index('key').join(other.set_index('key'))")
    print(df_join2)
    print("# 通过设置on参数，可以将调用者的列作为关键字进行合并，其它数据集中只能使用索引")
    print(">>> df_join3 = df.join(other.set_index('key')， on='key')")
    df_join3 = df.join(other.set_index('key'), on='key')
    print(df_join3)


def dataframe_concat():
    """
    concat可以连接多个数据集，只能使用索引为键连接，
    concat支持在合并轴向上选择索引值“并”（outer）和“交“（inner）的合并方式
    concat可以检测重复值，即索引和值的重复索引值，阻止合并数据集中出现重复索引值
    可先使用set_index设置索引，再使用reset_index重置索引
    """
    df1 = pd.DataFrame(
        {
            'a': ['A' + str(j) for j in range(4)],
            'b': ['B' + str(j) for j in range(4)],
            'c': ['C' + str(j) for j in range(4)]
        },
        index=range(4)
    )
    df2 = pd.DataFrame(
        {
            'b': ['B' + str(j) for j in range(4)],
            'c': ['C' + str(j) for j in range(4)],
            'd': ['D' + str(j) for j in range(4)]
        },
        index=[2, 3, 6, 7]
    )
    print(df1, '\n', df2)
    dfconcat1 = pd.concat([df1, df2])
    print("# 默认ignore_index=False， 合并后数据集保留原索引")
    print(">>> pd.concat([df1, df2])")
    print(dfconcat1)
    dfconcat2 = pd.concat([df1, df2], ignore_index=True)
    print("# 使用ignore_index=True， 对合并后数据集重置索引")
    print(">>> dfconcat2 = pd.concat([df1, df2], ignore_index=True)")
    print(dfconcat2)
    dfconcat3 = pd.concat([df1, df2], join='inner')
    print("# join='inner'， 合并后数据集非拼接轴向索引求交集, 不会出现NaN值")
    print(">>> pd.concat([df1, df2], join='inner')")
    print(dfconcat3)
    dfconcat4 = pd.concat([df1, df2], axis=1)
    print("# axis=1, 行标签合并方式默认为outer")
    print(">>> pd.concat([df1, df2], axis=1)")
    print(dfconcat4)
    dfconcat5 = pd.concat([df1, df2], axis=1, keys=['df1', 'df2'])
    print("# 设置keys, 增加索引层次，避免标签值重复")
    print(">>> pd.concat([‘df1’, ‘df2’], axis=1)")
    print(dfconcat5)


def datafrmae_update():
    """
    update为对象方法：df1.update(df2)
    使用非NaN值更新主数据集
    目前仅支持left方式，即以主数据集的索引对齐被连接数据集索引
    重叠的行、列索引，使用更新方式，用从数据集的数据更新主数据集数据，使用overwrite选择替换NaN值或所有对应值
    非重叠行索引，保留主数据集行索引，忽略从数据集数据行
    非重叠列索引，保留主数据集列索引，忽略从数据集数据列
    该方法直接改变主数据集

    update(other, join='left', overwrite=True, filter_func=None, errors='ignore')
        other : DataFrame, or object coercible into a DataFrame
        Should have at least one matching index/column label
        with the original DataFrame. If a Series is passed,
        its name attribute must be set, and that will be
        used as the column name to align with the original DataFrame.
    join : {'left'}, default 'left'
        Only left join is implemented, keeping the index and columns of the
        original object.
    overwrite : bool, default True
        How to handle non-NA values for overlapping keys:
        * True: overwrite original DataFrame's values
          with values from `other`.
        * False: only update values that are NA in
          the original DataFrame.

    filter_func : callable(1d-array) -> bool 1d-array, optional
        Can choose to replace values other than NA. Return True for values
        that should be updated.
    errors : {'raise', 'ignore'}, default 'ignore'
        If 'raise', will raise a ValueError if the DataFrame and `other`
        both contain non-NA data in the same place.
    return: None
    """
    df1 = pd.DataFrame(
        {
            'a': ['A' + str(j) for j in range(4)],
            'b': ['B' + str(j) for j in range(4)],
            'c': ['C' + str(j) for j in range(4)]
        },
        index=range(4)
    )
    df2 = pd.DataFrame(
        {
            'b': ['B' + str(j) for j in range(4)],
            'c': ['C' + str(j) for j in range(4)],
            'd': ['D' + str(j) for j in range(4)]
        },
        index=[2, 3, 6, 7]
    )
    print(df1, '\n', df2)
    df1.update(df2)
    print("# 默认ignore_index=False， 合并后数据集保留原索引")
    print(">>> df1.update(df2)")
    print(df1)


class Task:
    df1 = pd.DataFrame(
        data={'sno': [str(1000+i) for i in range(1, 5)],
              'name': ["张力", "李明", "何同", "孟娥"],
              'classno': [2, 1, 1, 2]}
    )
    df2 = pd.DataFrame(
        data={
            "sno": [str(1000+i) for i in range(1, 4)],
            "score1": [87, 90, 76],
            "score2": [78, 89, 56]
        }
    )
    df3 = pd.DataFrame(
        data={
            "sno": [str(1000+i) for i in range(1, 5) if i != 3],
            "score3": [66, 99, 70],
            "score4": [77, 88, 60]
        }
    )
    df5 = None

    @classmethod
    def task_merge(cls):
        dfmerge12 = pd.merge(cls.df1, cls.df2, how='left', on='sno')
        dfmerge123 = pd.merge(dfmerge12, cls.df3, how='left', on='sno')
        print(">>> df1, df2, df3")
        print(cls.df1)
        print(cls.df2)
        print(cls.df3)
        print(">>> dfmerge12 = pd.merge(df1, df2, how='left', on='sno')")
        print(dfmerge12)
        print(">>> dfmerge123 = pd.merge(dfmerge12, df3, how='left', on='sno')")
        print(dfmerge123)
        dfmerge123.fillna(0, inplace=True)
        print(dfmerge123)

    @classmethod
    def task_join(cls):
        df4 = cls.df1.join(cls.df2.set_index('sno'), how='outer', on='sno')
        df5 = df4.join(cls.df3.set_index('sno'), how='outer', on='sno')
        df5.fillna(0, inplace=True)
        print(df5)

    @classmethod
    def task_concat(cls):
        dflist = [df.set_index('sno') for df in [cls.df1, cls.df2, cls.df3]]
        df5 = pd.concat(dflist, axis=1)
        df5 = df5.reset_index()
        df5.df_fillna_with_value(0, inplace=True)
        print(df5)

    @classmethod
    def task_append(cls):
        """append方法只能沿行方向添加数据，使用stack改变数据轴向后添加，添加完成后在unstack回复轴向"""
        # set_index
        df1 = cls.df1.set_index('sno')
        df2 = cls.df2.set_index('sno')
        df3 = cls.df3.set_index('sno')
        # stack to rows for appending
        df4 = df1.stack().append(df2.stack())
        df5 = df4.append(df3.stack())
        # unstack
        df5 = df5.unstack()
        # reset_index
        df5 = df5.reset_index()
        df5.df_fillna_with_value(0, inplace=True)
        print(df5)

    @classmethod
    def task_groupby(cls):
        g = cls.df5.groupby('classno')
        for gkey in g.groups:
            print(f"第{gkey}组数据集：")
            print(g.get_group(gkey))


if __name__ == '__main__':
    # dataframe_concat()
    datafrmae_update()
    # dataframe_groupby()
    # Task.task_merge()
    # Task.task_join()
    # Task.task_concat()
    # Task.task_append()
    # Task.task_groupby()
    # print(dataframe_split(df1, [0.2, 0.4, 0.4]))
