from abc import ABCMeta, abstractmethod

from sam import import_dir_path
from sam.util.beanUtil import exclude_dict_none_properties
from sam.util.dbUtil import DB, DbConnectionConfig, DEFAULT_MYSQL_CONNECTION_CONFIG
from sam.util.excelUtil import handle_by_sheet
from sam.util.fileUtil import get_file_path_list_by_dir_path
from sam.util.fileUtil2 import read_file, check_file


class File2Db(DB, metaclass=ABCMeta):
    """
    target:
    1: 实现 从文件夹或文件中 读取内容并更新到数据库
    2: 支持 excel 和 csv 文件的读取
    3: 支持 多种模式更新数据 如 插入更新 批量插入 批量更新 等
    """

    def __init__(self, table_name, import_table_column_list: list = None,
                 config: DbConnectionConfig = DEFAULT_MYSQL_CONNECTION_CONFIG):
        super().__init__(config=config)
        self.table_name = table_name
        self.import_table_column_list = import_table_column_list
        self.max_random_count = 10
        self.file_read_stats = {"total": 0}

    @abstractmethod
    def transfer_list_2_dict(self, line: list) -> dict:
        """
        将 list 转换成 dict
        """
        raise NotImplementedError

    def assert_read_file_content_is_suitable(self, file_path) -> bool:
        return check_file(file_path, self.import_table_column_list)

    @abstractmethod
    def transfer_list_2_dict_by_sheet(self, sheet_name: str, line: list, sheet_column_list: list,
                                      row_index: int) -> dict:
        raise NotImplementedError

    def batch_update_by_sheet(self, sheet_name: str, sheet_content: list, outer_params: dict):
        batch_size = outer_params.get("batch_size")
        optional = outer_params.get("optional")
        is_only_print_sql = outer_params.get("is_only_print_sql")
        exclude_properties = outer_params.get("exclude_properties")
        start = outer_params.get("start")
        end = outer_params.get("end")
        condition = outer_params.get("condition")
        start_end_tuple_list = outer_params.get("start_end_tuple_list")
        is_filter_none_properties = outer_params.get("is_filter_none_properties")
        is_skip_first_line = outer_params.get("is_skip_first_line")
        update_key_prefix = outer_params.get("update_key_prefix")

        sheet_column = []
        item_list = []
        if is_skip_first_line:
            pop = sheet_content.pop(0)
            sheet_column.extend(pop)
        else:
            sheet_column = sheet_content[0]

        for index, line in enumerate(sheet_content):
            # 对 读到的文件进行预处理
            if not line[0]:
                continue
            line = [str(column) for column in line]
            item = self.transfer_list_2_dict_by_sheet(sheet_name, line, sheet_column_list=sheet_column, row_index=index)
            if item and isinstance(item, dict):
                # 去除无效属性
                if is_filter_none_properties:
                    exclude_dict_none_properties(item)
                item_list.append(item)
        self.file_read_stats[sheet_name] = len(item_list)
        self.file_read_stats["total"] += len(item_list)
        self.batch_update_by_item_list(item_list, self.table_name, batch_size=batch_size, optional=optional,
                                       is_only_print_sql=is_only_print_sql, exclude_properties=exclude_properties,
                                       start=start, end=end, condition=condition,
                                       update_key_prefix=update_key_prefix,
                                       start_end_tuple_list=start_end_tuple_list)

    def _batch_update_by_sheet(self, read_file_path: str
                               , batch_size: int = 100
                               , optional: str = "BatchInsert"
                               , is_only_print_sql: bool = True
                               , exclude_properties: list = None
                               , start: int = None
                               , end: int = None
                               , condition: dict = None
                               , start_end_tuple_list: list = None
                               , is_filter_none_properties: bool = False
                               , is_skip_first_line: bool = True
                               , update_key_prefix: str = None):
        outer_params = {
            "batch_size": batch_size,
            "optional": optional,
            "is_only_print_sql": is_only_print_sql,
            "exclude_properties": exclude_properties,
            "start": start,
            "end": end,
            "condition": condition,
            "start_end_tuple_list": start_end_tuple_list,
            "is_filter_none_properties": is_filter_none_properties,
            "is_skip_first_line": is_skip_first_line,
            "update_key_prefix": update_key_prefix
        }
        handle_by_sheet(read_file_path, self.batch_update_by_sheet, outer_params)

    def batch_update_by_file(self
                             , read_file_path: str
                             , batch_size=100
                             , optional: str = None
                             , is_only_print_sql: bool = False
                             , exclude_properties: list = None
                             , start: int = None
                             , end: int = None
                             , condition: dict = None
                             , start_end_tuple_list: list = None
                             , is_filter_none_properties: bool = False
                             , is_skip_first_line: bool = False
                             , update_key_prefix: str = None
                             , is_only_handle_first_sheet: bool = True
                             ):
        """
        批量更新数据库 通过单个文件
        """
        self.logger.info(f"开始 读取文件 {read_file_path} 的 内容")
        if is_only_handle_first_sheet:
            if not self.assert_read_file_content_is_suitable(read_file_path):
                msg = f"文件: {read_file_path} 读取到的内容与程序处理能力不匹配"
                raise RuntimeError(msg)
            # 如果有文件表头 则必定跳过首行
            is_skip_first_line = True if self.import_table_column_list else is_skip_first_line
            lines = read_file(read_file_path, is_skip_first_line=is_skip_first_line)
            self.logger.info(f"文件 {read_file_path} 读取 完成 , 将开始对文件进行 预处理 ...")
            item_list = []
            for line in lines:
                # 对 读到的文件进行预处理
                if not line[0]:
                    continue
                line = [str(column) for column in line]
                item = self.transfer_list_2_dict(line)
                if isinstance(item, dict):
                    # 去除无效属性
                    if is_filter_none_properties:
                        exclude_dict_none_properties(item)
                    item_list.append(item)

            self.file_read_stats[read_file_path] = len(item_list)
            self.file_read_stats["total"] += len(item_list)
            self.logger.info(f"对 文件 {read_file_path} 预处理完成, 符合条件的有 {len(item_list)} 条数据, 将要对文件进行更新至数据库操作... ")
            self.batch_update_by_item_list(item_list, self.table_name, batch_size=batch_size, optional=optional,
                                           is_only_print_sql=is_only_print_sql, exclude_properties=exclude_properties,
                                           start=start, end=end, condition=condition,
                                           update_key_prefix=update_key_prefix,
                                           start_end_tuple_list=start_end_tuple_list)
        else:
            if read_file_path.endswith(".xls") or read_file_path.endswith(".xlsx"):
                self._batch_update_by_sheet(read_file_path, batch_size=batch_size, optional=optional,
                                            is_only_print_sql=is_only_print_sql, exclude_properties=exclude_properties,
                                            start=start, end=end, condition=condition,
                                            update_key_prefix=update_key_prefix,
                                            start_end_tuple_list=start_end_tuple_list)
            else:
                raise RuntimeWarning(f" 只有 excel 格式的文件 才能进行 单个sheet操作; 当前文件路径是: {read_file_path} ")

        self.logger.info(f" 完成 {read_file_path}")

    def batch_update_by_dir(self
                            , read_dir_path: str = import_dir_path
                            , start: int = None
                            , end: int = None
                            , batch_size=100
                            , optional: str = "BatchInsert"
                            , is_only_print_sql: bool = True
                            , condition: dict = None
                            , exclude_properties: list = None
                            , is_filter_none_properties: bool = False
                            , is_skip_first_line: bool = False
                            , update_key_prefix: str = None
                            , is_only_handle_first_sheet: bool = True
                            ):
        """
        批量更新数据库 通过文件夹
        """
        if optional not in ["BatchInsert", "InsertOrUpdate", "BatchUpdate"]:
            error_msg = f"参数错误: 当前操作: {optional} 不支持"
            raise RuntimeError(error_msg)
        if not batch_size or batch_size < 0:
            error_msg = f"参数错误: batch_size : 必须大于0, 当前值为 {batch_size}"
            raise RuntimeError(error_msg)

        read_file_path_list = get_file_path_list_by_dir_path(read_dir_path)
        if read_file_path_list:
            max_size = len(read_file_path_list)
            self.logger.info(f"当前文件夹 {read_dir_path} 共有 {max_size} 个文件")
            if not start:
                start = 0
            if not end:
                end = 0 if end == 0 else max_size
            end = end if max_size >= end else max_size
            self.logger.info(f"本次处理 第 {start} 个 至 第 {end} 个 文件")

            for i in range(max_size):
                if start <= i <= end:
                    read_file_path = read_file_path_list[i]
                    self.batch_update_by_file(
                        read_file_path
                        , batch_size=batch_size
                        , optional=optional
                        , is_only_print_sql=is_only_print_sql
                        , exclude_properties=exclude_properties
                        , condition=condition
                        , is_filter_none_properties=is_filter_none_properties
                        , is_skip_first_line=is_skip_first_line
                        , is_only_handle_first_sheet=is_only_handle_first_sheet
                        , update_key_prefix=update_key_prefix
                        # , start_end_tuple_list=[
                        #     (53535, 53536),
                        #     (154354, 154355),
                        # ]
                        # , start=65700
                        # , end=55900
                    )
        else:
            raise RuntimeWarning("该文件夹是空文件夹")

    def __del__(self):
        self.logger.info(f"读取文件的统计是: {self.file_read_stats} \n 写数据库的统计是: {self.operate_record_stats}")
