import gc
import os.path
from typing import Literal

from .common_func.file_open import FileOpen
from .common_func.print_logger import PrintLogger

import_error_warned = False
try:
    import pandas as pd
except ModuleNotFoundError:
    if not import_error_warned:
        PrintLogger.warning("UDataFrame is not valid, because require module:[pandas] is not installed. "
                            "If you want use UDataFrame, please install [pandas] first")
        import_error_warned = True

MergeHow = Literal["left", "right", "inner", "outer", "cross"]


class TempRes:
    pass


def _deal_non_serialized_object(obj):
    # noinspection PyBroadException
    try:
        if isinstance(obj, set) or isinstance(obj, tuple):
            return list(obj)
        return str(obj)
    except Exception:
        return ""


class UDataFrame:
    data_frame = None
    column = list()

    @classmethod
    def get_id_set(cls):
        res = set()
        if len(cls.data_frame):
            id_list = cls.get("id", res_type="list")

            for _list in id_list:
                res.add(_list[0])
        return res

    @classmethod
    def get(cls, fields=None, cond_field_name="", cond_values=None, res_type: str = "list"):
        """
        获取数据的基础入口函数
        :param res_type: 返回值类型，支持list，dict和property
        :param fields: 返回的字段名列表，如果不指定则返回所有
        :param cond_field_name: 查询条件字段名，如果为空则无条件查询
        :param cond_values: 查询条件字段值，如果为空则无条件查询
        :return:
        """
        cls.update()
        if not len(cls.data_frame):
            return list()
        fields = cls._arg2list(fields)
        cond_values = cls._arg2list(cond_values)

        if fields:
            if cond_field_name and cond_values:
                res = cls.data_frame.loc[cls.data_frame[cond_field_name].isin(cond_values), fields]
            else:
                res = cls.data_frame.loc[:, fields]
        else:
            if cond_field_name and cond_values:
                res = cls.data_frame.loc[cls.data_frame[cond_field_name].isin(cond_values),]
            else:
                res = cls.data_frame
        return cls.get_result(res, res_type=res_type)

    @classmethod
    def get_single(cls, _field, cond_field_name, cond_value):
        """
        获取单个数据，如果有多个符合条件的，只返回第一个
        :param _field: 返回的字段名列表，如果不指定则返回所有
        :param cond_field_name: 查询条件字段名，如果为空则无条件查询
        :param cond_value: 查询条件字段值，如果为空则无条件查询
        :return:
        """
        cls.update()
        res = cls.data_frame.loc[cls.data_frame[cond_field_name] == cond_value, [_field]]
        if not res.empty:
            return res.values.tolist()[0][0]
        return ""

    @classmethod
    def get_name_by_id(cls, _id):
        return cls.get_single("name", "id", _id)

    @classmethod
    def get_by_id(cls, _ids, res_type: str = "list"):
        return cls.get(cond_field_name="id", cond_values=_ids, res_type=res_type)

    @classmethod
    def get_by_name(cls, _names, res_type: str = "list"):
        return cls.get(cond_field_name="name", cond_values=_names, res_type=res_type)

    @classmethod
    def update(cls, force=False):
        if cls.data_frame is None or force:
            cls._build_data_frame(True)

    @classmethod
    def _build_data_frame(cls, _force=False):
        """
        构建数据框函数，这是个Demo, 须在继承子类中重写此函数
        :param _force:
        :return:
        """
        _column = ["id", "name", "status"]
        _data_list = [[1, "a", "1"], [2, "b", "0"]]

        cls.data_frame = pd.DataFrame(_data_list, columns=_column)

    @classmethod
    def get_result(cls, df_query_res, res_type: str = "list"):
        """
        将数据框筛选结果转为指定的返回类型，支持 list(列表)/dict(字典)/property(属性)
        属性是指可通过.xx方式获取结果项，如数据框列名为A，以属性类型返回结果res后，可通过res.A获取A列数据
        :param df_query_res:
        :param res_type:
        :return:
        """
        if df_query_res.empty:
            return list()
        res = df_query_res.values.tolist()
        if res_type == "property":
            return cls.list_res_2_property(res)
        elif res_type == "dict":
            return cls.list_res_2_dict(res)
        else:
            return res

    @staticmethod
    def _arg2list(arg):
        """
        将除列表或元组外的其他参数类型转换为列表
        :param arg: 参数
        :return:
        """
        if arg is None:
            return list()
        if isinstance(arg, tuple) or isinstance(arg, list):
            return arg
        return [arg]

    @classmethod
    def list_res_2_dict(cls, res_list):
        new_res = list()
        for res in res_list:
            new_res.append(dict(zip(cls.column, res)))
        return new_res

    @classmethod
    def list_res_2_property(cls, res_list):
        new_res = list()
        item_num = len(cls.column)
        for res in res_list:
            temp_res = TempRes()
            for i in range(item_num):
                setattr(temp_res, cls.column[i], res[i])
            new_res.append(temp_res)
        return new_res


class UDfOp:
    _debug = 0
    _record_limit = 0

    @classmethod
    def enable_debug(cls, record_limit=0):
        cls._debug = 1
        cls._record_limit = record_limit

    @classmethod
    def disable_debug(cls):
        cls._debug = 0
        cls._record_limit = 0

    @staticmethod
    def optimize_df_mem(df):
        if not isinstance(df, pd.DataFrame):
            PrintLogger.warning("入参不是pandas数据框，无法优化，直接返回")
            return df
        df_int = df.select_dtypes(include=["int"])
        df_float = df.select_dtypes(include=["float"])
        df_object = df.select_dtypes(include=["object"])
        other_columns = list(set(df.columns) - set(df_int.columns) - set(df_float.columns) - set(df_object.columns))
        df_other = df.loc[:, other_columns]

        convert_int = df_int.apply(pd.to_numeric, downcast='unsigned')
        convert_float = df_float.apply(pd.to_numeric, downcast='float')

        convert_object = pd.DataFrame()
        for col in df_object.columns:
            num_unique = len(df_object[col].unique())
            num_total = len(df_object[col])
            if num_total and num_unique / num_total < 0.5:
                convert_object.loc[:, col] = df_object[col].astype("category")
            else:
                convert_object.loc[:, col] = df_object[col]
        res_df = pd.concat([convert_int, convert_float, convert_object, df_other], axis=1)

        del df
        del df_object
        del df_other
        del df_float
        del df_int
        del convert_int
        del convert_float
        del convert_object
        gc.collect()
        return res_df

    @classmethod
    def save_df_to_dir(cls, df, dir_path: str, file_base_name: str, zipped: bool = True):
        """
        将数据框存储到硬盘, 存储后调用transfer_pickle做转换
        :param df:
        :param dir_path:
        :param file_base_name:
        :param zipped:
        :return:
        """
        if not isinstance(df, pd.DataFrame):
            PrintLogger.warning("入参不是pandas数据框，无法保存，直接返回")
            return
        _file = os.path.join(dir_path, file_base_name + ".pkl")
        cls.save_df_to_file(df, _file, zipped)

    @classmethod
    def save_df_to_file(cls, df, _file: str, zipped: bool = True):
        """
        将数据框存储到硬盘, 存储后调用transfer_pickle做转换
        :param df:
        :param _file:
        :param zipped:
        :return:
        """
        if not isinstance(df, pd.DataFrame):
            PrintLogger.warning("入参不是pandas数据框，无法保存，直接返回")
            return
        with FileOpen(_file, "wb", encrypt_mode="aes") as f:
            if zipped:
                df.to_pickle(f, compression="zip")
            else:
                df.to_pickle(f)

        # 以下存储为csv的格式，仅为调试时方便查看数据，项目实际使用时，将配置文件的数据调试配置改为0（data_debug: 0)
        if not cls._debug:
            return
        if cls._record_limit and len(df) > cls._record_limit:
            df.tail(cls._record_limit).to_csv(_file.replace(".pkl", ".csv"), index=False)
        else:
            df.to_csv(_file.replace(".pkl", ".csv"), index=False)

    @classmethod
    def load_df_from_pickle(cls, dir_path: str, file_base_name: str, default_columns: list = None, zipped: bool = True):
        """
        从pkl文件中读取数据框，如果文件不存在，则返回列为 default_columns 的空数据框
        :param dir_path:
        :param file_base_name:
        :param default_columns:
        :param zipped:
        :return:
        """
        _file = os.path.join(dir_path, file_base_name + ".pkl")
        return cls.load_pickle_file(_file, default_columns, zipped)

    @classmethod
    def load_pickle_file(cls, _file: str, default_columns: list = None, zipped: bool = True):
        """
        从pkl文件中读取数据框，如果文件不存在，则返回列为 default_columns 的空数据框
        :param _file:
        :param default_columns:
        :param zipped:
        :return:
        """
        if os.path.exists(_file):
            with FileOpen(_file) as file_io:
                if zipped:
                    return pd.read_pickle(file_io, compression="zip")
                else:
                    return pd.read_pickle(file_io)
        else:
            return pd.DataFrame(columns=default_columns)

    @classmethod
    def load_csv(cls, _file: str, default_columns: list = None):
        """
        从csv文件中读取数据框，如果文件不存在，则返回列为 default_columns 的空数据框
        :param _file:
        :param default_columns:
        :return:
        """
        if os.path.exists(_file):
            with FileOpen(_file) as file_io:
                return pd.read_csv(file_io)
        else:
            return pd.DataFrame(columns=default_columns)

    @classmethod
    def pkl_to_csv(cls, pkl_file, encrypt=False):
        _df = cls.load_pickle_file(pkl_file)
        if not len(_df):
            PrintLogger.warning("[UDfOp.pkl_to_csv]::No Data in pkl file")
        else:
            _file = pkl_file.replace(".pkl", ".csv")
            if encrypt:
                with FileOpen(_file, "wb", encrypt_mode="aes") as f:
                    _df.to_csv(f, index=False)
            else:
                _df.to_csv(_file, index=False)

    @classmethod
    def pkl_to_json(cls, pkl_file, part_size=100, orient="records", indent=4, force_ascii=False,
                    double_precision=10, encrypt=False):
        """

        :param pkl_file:
        :param part_size: 分段行数限制，防止json文件过大无法打开，默认100
        :param orient:  'split'：将数据分为 index、columns、data 三部分。
                        'records'：每行作为一个独立的 JSON 对象。
                        'index':将索引作为键，数据作为嵌套 JSON 对象。
                        'columns':将列名作为键，数据作为嵌套 JSON 对象。
                        'values':仅导出值的列表。
                        'table':基于 JSON 表格模式。
                        默认值::'records'
        :param indent: 设置缩进级别。默认值：4
        :param force_ascii: 是否强制将非 ASCII 字符编码为 \\ u 序列。默认值: False
        :param double_precision: 设置浮点精度。默认值: 10
        :param encrypt: 是否加密存储
        :return:
        """
        _df: pd.DataFrame = cls.load_pickle_file(pkl_file)
        if not len(_df):
            PrintLogger.warning("[UDfOp.pkl_to_csv]::No Data in pkl file")
        else:
            part = 0
            while part * part_size < len(_df):
                start_line_no = part * part_size
                end_line_no = part * part_size + part_size
                part_df: pd.DataFrame = _df.iloc[start_line_no:end_line_no]
                _file = pkl_file.replace(".pkl", f"_part{part}.json")
                if encrypt:
                    with FileOpen(_file, "wt", encrypt_mode="aes") as f:
                        part_df.to_json(f, orient=orient, indent=indent, force_ascii=force_ascii,
                                        double_precision=double_precision, default_handler=_deal_non_serialized_object)
                else:
                    part_df.to_json(_file, orient=orient, indent=indent, force_ascii=force_ascii,
                                    double_precision=double_precision, default_handler=_deal_non_serialized_object)
                part += 1

    @staticmethod
    def trans_dict_list_to_df(obj: list):
        return pd.DataFrame.from_records(obj)

    @staticmethod
    def from_records(obj: list):
        try:
            return pd.DataFrame.from_records(obj)
        except Exception as e:
            PrintLogger.error(f"[UDfOp.from_records]::Error msg:{e}, line_no:{e.__traceback__.tb_lineno}")
            return pd.DataFrame()

    @staticmethod
    def merge_df(df_list: list, on, how: MergeHow = "outer"):
        """
        列 merge
        :param df_list:
        :param on:
        :param how: Literal["left", "right", "inner", "outer", "cross"] = "outer"
        :return:
        """
        if not isinstance(df_list, list):
            return pd.DataFrame()
        new_df_list = list()
        for _df in df_list:
            if isinstance(_df, pd.DataFrame) and len(_df):
                new_df_list.append(_df)

        if not new_df_list:
            return pd.DataFrame()
        origin_df = new_df_list.pop()
        for _df in new_df_list:
            origin_df = pd.merge(origin_df, _df, on=on, how=how, validate="many_to_many")
        return origin_df

    @staticmethod
    def concat_df(df_list: list):
        """
        行连接
        :param df_list:
        :return:
        """
        if not isinstance(df_list, list):
            return pd.DataFrame()
        new_df_list = list()
        for _df in df_list:
            if isinstance(_df, pd.DataFrame) and len(_df):
                new_df_list.append(_df)
        if not new_df_list:
            return pd.DataFrame()
        if len(new_df_list) == 1:
            return new_df_list[0]
        else:
            return pd.concat(new_df_list, axis=0)

    @staticmethod
    def filter_df(a, b, on):
        """
        以on参数指定的字段或多字段组合进行过滤，过滤仅在a中出现，在b中不出现的数据
        :param a:
        :param b:
        :param on:
        :return:
        """
        if not isinstance(a, pd.DataFrame) or not len(a):
            return pd.DataFrame()
        if not isinstance(b, pd.DataFrame) or not len(b):
            return a
        merged = pd.merge(a, b, on=on, how="outer", validate="many_to_many", indicator=True)
        static_df = merged[merged["_merge"] == "left_only"]
        origin_columns = list(static_df.columns)
        origin_columns.remove("_merge")
        res_df = static_df.loc[:, origin_columns]
        del static_df
        del merged
        return res_df
