# 文件处理工具
import os
import json
import yaml
from configparser import ConfigParser
from bs4 import BeautifulSoup
from .type_ import str_to, to_str


def _replace_ignore_case(origin, _old, _new):
    """
    todo 使用性能更好的算法, 例如kmp或kmp变种等
    :param origin: 原始串
    :param _old: 旧串
    :param _new: 新串
    :return:
    """
    ans = ''
    lower_old = _old.lower()
    len_content = len(origin)
    len_old = len(_old)
    i = 0
    while i < len_content - len_old:
        if origin[i:i + len_old].lower() == lower_old:
            ans += _new
            i = i + len_old
            continue
        ans += origin[i:i+1]
        i += 1
    return ans


def replace_content(filename: str,
                    new_filename: str,
                    _old: str,
                    _new: str,
                    _count=-1,
                    encoding='utf-8',
                    ignore_case=False):
    """
    替换文件文本内容
    :param filename: 文件名称
    :param new_filename: 新生成的文件名称
    :param _old: 被替换的字符串
    :param _new: 替换的字符串
    :param_count: 替换的个数
    :param encoding: 编码
    :param ignore_case: 是否忽略大小写
    :return:
    """
    with open(filename, 'r', encoding=encoding) as f:
        content = f.read()
    if ignore_case:
        content = _replace_ignore_case(content, _old, _new)
    else:
        content = content.replace(_old, _new, _count)
    with open(new_filename, 'w', encoding=encoding) as f:
        f.write(content)


def replace_dirs(dir1: str,
                 dir2: str,
                 _old: str,
                 _new: str,
                 _count=-1,
                 encoding='utf-8',
                 ignore_case=False,
                 include_suffix=('*', ),
                 exclude_suffix=tuple(),
                 replace_func=replace_content,
                 filter_funcs=None):
    """
    替换目录中的所有文件并存储至新目录
    :param dir1: 目录
    :param dir2: 新目录
    :param _old: 被替换的字符串
    :param _new: 替换的字符串
    :param _count: 替换的个数
    :param encoding: 编码
    :param ignore_case: 忽略大小写
    :param include_suffix: 包含的后缀
    :param exclude_suffix: 过滤的后缀
    :param replace_func: 替换字符串函数
    :param filter_funcs: 文件过滤
    :return:
    """
    return _replace_dirs(dir1, dir2, _old, _new, _count, encoding, ignore_case, include_suffix, exclude_suffix, replace_func, filter_funcs, 0)


def _replace_dirs(dir1: str,
                  dir2: str,
                  _old: str,
                  _new: str,
                  _count=-1,
                  encoding='utf-8',
                  ignore_case=False,
                  include_suffix=('*', ),
                  exclude_suffix=tuple(),
                  replace_func=replace_content,
                  filter_funcs=None,
                  deep=0):
    """
    替换目录中的所有文件并存储至新目录
    :param dir1: 目录
    :param dir2: 新目录
    :param _old: 被替换的字符串
    :param _new: 替换的字符串
    :param _count: 替换的个数
    :param encoding: 编码
    :param ignore_case: 忽略大小写
    :param include_suffix: 包含的后缀
    :param exclude_suffix: 过滤的后缀
    :param replace_func: 替换字符串函数
    :param filter_funcs: 文件过滤
    :return:
    """
    if not os.path.exists(dir2):
        os.makedirs(dir2)
    failures = []
    for file_name in os.listdir(dir1):
        cur_path = dir1 + os.sep + file_name
        out_path = dir2 + os.sep + file_name
        if os.path.isdir(cur_path):
            failures += _replace_dirs(cur_path, out_path, _old, _new, _count, encoding, ignore_case, include_suffix, exclude_suffix, replace_func, filter_funcs, deep + 1)
        else:
            suffix = file_name.split('.')[-1]
            if '*' in include_suffix and suffix not in exclude_suffix or suffix in include_suffix:
                status = True
                if filter_funcs:
                    for filter_func in filter_funcs:
                        status &= filter_func()
                if status:
                    try:
                        replace_func(cur_path, out_path, _old, _new, _count, encoding, ignore_case)
                    except UnicodeDecodeError:
                        failures.append(f'文件{cur_path}无法使用{encoding}编码打开, 忽略该文件')
    if deep:
        return failures
    if failures:
        raise Exception('\n'.join(failures))


def load_file_data(filename: str,
                   encoding='utf-8',
                   header=False):
    """
    加载文件数据
    :param filename: 文件名称
    :param encoding: 编码
    :param header: 针对csv及xlsx等文件时，是否首行表头
    :return:
    """
    if not os.path.isfile(filename):
        raise FileNotFoundError(filename)
    if filename.endswith('.ini') or filename.endswith('.cfg'):
        return _load_ini_data(filename)
    elif filename.endswith('.yaml') or filename.endswith('.yml'):
        return _load_yaml_data(filename, encoding)
    elif filename.endswith('.json'):
        return _load_json_data(filename, encoding)
    elif filename.endswith('.xml') or filename.endswith('.lxml'):
        return _load_xml_html_data(filename, 'lxml', encoding)
    elif filename.endswith('.html') or filename.endswith('.htm'):
        return _load_xml_html_data(filename, 'html.parser', encoding)
    elif filename.endswith('.properties'):
        return _load_properties_data(filename, encoding)
    elif filename.endswith('.csv'):
        return _load_csv_data(filename, header, encoding)
    raise Exception('不支持的文件类型')


def _load_yaml_data(filename: str, encoding='utf-8'):
    with open(filename, 'r', encoding=encoding) as f:
        return yaml.load(f.read(), yaml.SafeLoader)


def _load_json_data(filename: str, encoding='utf-8'):
    with open(filename, 'r', encoding=encoding) as f:
        return json.loads(f.read())


def _load_ini_data(filename: str):
    cp = ConfigParser()
    cp.read(filename)
    ans = {}
    for section in cp.sections():
        ans[section] = {}
        for option in cp.options(section):
            ans[section][option] = cp.get(section, option)
    return ans


def _load_xml_html_data(filename: str, feature, encoding='utf-8'):
    with open(filename, 'r', encoding=encoding) as f:
        return BeautifulSoup(f.read(), feature)


def _load_properties_data(filename, encoding='utf-8'):
    def dfs_properties_key(p, keys, value):
        if len(keys) == 1:
            p[keys[0]] = value
            return
        if keys[0] not in p:
            p[keys[0]] = {}
        dfs_properties_key(p[keys[0]], keys[1:], value)

    properties = {}
    with open(filename, 'r', encoding=encoding) as f:
        for line in f.readlines():
            i = line.index('=')
            if i >= 0:
                key, value = line[:i].strip(), str_to(line[i + 1:].strip())
                properties[key] = value
                keys = key.split(".")
                if len(keys) >= 2:
                    dfs_properties_key(properties, keys, value)

    return properties


def _load_csv_data(filename: str, header=False, encoding='utf-8'):
    """
    加载csv文件数据
    :param filename: 文件名称
    :param header: 是否首行表头
    :param encoding: 编码
    :return: list
    """
    return _load_sep_data(filename, ',', header, encoding)


def _load_sep_data(filename: str, sep: str, header=False, encoding='utf-8', checked=True):
    """
    加载特定分隔符的数据
    :param filename: 文件名称
    :param sep: 分隔符
    :param header: 是否首行表头
    :param encoding: 编码
    :param checked: 当header为true的时候，是否检查值长度与表头长度一致。若长度不一致，checked的true时会抛出异常，为false时会忽略该条记录
    :return: list
    """
    with open(filename, 'r', encoding=encoding) as f:
        ans = []
        if header:
            header_keys = f.readline().strip().split(sep)
            len_header_keys = len(header_keys)
        for line in f.readlines():
            line = line.strip()
            if line == '':
                continue
            values = line.split(sep)
            if header:
                if len(values) != len(header_keys):
                    if checked:
                        raise Exception('存在列数据与表头长度不一致')
                    else:
                        continue
                v = {}
                for i in range(len_header_keys):
                    v[header_keys[i]] = values[i]
                ans.append(v)
            else:
                ans.append(values)
        return ans


def write_data(filename: str, data, header=False, encoding='utf-8'):
    if not filename:
        return Exception('文件名称不能为空')
    suffix = filename.split('.')[-1]
    if suffix == 'csv':
        _write_csv_data(filename, data, header, encoding=encoding)
    elif suffix in ('yaml', 'yml'):
        _write_yaml_data(filename, data, encoding)
    elif suffix == 'properties':
        _write_properties_data(filename, data, encoding)
    elif suffix == 'json':
        _write_json_data(filename, data, encoding)
    else:
        raise Exception('不支持的文件类型')


def _write_json_data(filename: str, data, encoding='utf-8'):
    with open(filename, 'w', encoding=encoding) as f:
        f.write(json.dumps(data, ensure_ascii=False) + '\n')


def _write_yaml_data(filename: str, data, encoding='utf-8'):
    with open(filename, 'w', encoding=encoding) as f:
        yaml.dump(data, f, allow_unicode=True, sort_keys=False, encoding=encoding)


def _write_csv_data(filename: str, data, header=False, encoding='utf-8'):
    with open(filename, 'w', encoding=encoding) as f:
        if isinstance(data, dict):
            data = [data]
        if isinstance(data, (tuple, list, set)):
            struct_ = []
            ans = []
            for one in data:
                if isinstance(one, (list, tuple, set)):
                    ans.append([to_str(e) for e in one])
                elif isinstance(one, dict):
                    if len(one) > len(struct_):
                        for key in one.keys():
                            if key not in struct_:
                                struct_.append(key)
                    v = []
                    for st in struct_:
                        v.append(to_str(one.get(st, '')))
                    ans.append(v)
            if header:
                f.write(','.join(struct_) + '\n')
            for one in ans:
                f.write(','.join(one) + '\n')


def _write_properties_data(filename: str, data, encoding='utf-8'):
    def dfs_properties_item(stream, prefix, v):
        if isinstance(v, (tuple, list, set)):
            v = list(v)
            for i in range(len(v)):
                dfs_properties_item(stream, prefix + '[' + str(i) + ']', v[i])
        elif isinstance(v, dict):
            dfs_properties_keys(stream, prefix, v, list(v.keys()))
        else:
            stream.write(prefix + "=" + to_str(v) + '\n')

    def dfs_properties_keys(stream, prefix, d, keys):
        cmp_prefix = prefix + "." if prefix else ""
        for key in keys:
            if isinstance(d[key], dict):
                dfs_properties_keys(stream, cmp_prefix + key if prefix else key, d[key], list(d[key].keys()))
            elif isinstance(d[key], (tuple, list, set)):
                for i in range(len(d[key])):
                    dfs_properties_item(stream, cmp_prefix + key + '[' + str(i) + ']', d[key][i])
            else:
                dfs_properties_item(stream, cmp_prefix + key, d[key])

    with open(filename, 'w', encoding=encoding) as f:
        if isinstance(data, (list, tuple, set)) and len(data) >= 1 and isinstance(data[0], dict):
            data = data[0]
        if isinstance(data, dict):
            dfs_properties_keys(f, '', data, list(data.keys()))
        else:
            raise Exception(f'不支持的数据类型: {type(data)}')


__FILE_SIZE_UNITS = ['B', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB', 'BB', 'NB', 'DB']


def get_user_read_size(size, decimal=1000):
    unit_id = 0
    while size >= 1024:
        size /= 1024
        unit_id += 1
    return str(round(size, decimal)) + ' ' + __FILE_SIZE_UNITS[unit_id]


def _get_batch_file_size(files):
    size = 0
    for file in files:
        size += os.path.getsize(file)
    return size


def get_dir_or_file_size(dir_or_file_path, executor=None, batch=50):
    if os.path.isdir(dir_or_file_path):
        if not executor:
            size = 0
            for root, dirs, files in os.walk(dir_or_file_path):
                size += sum([os.path.getsize(os.path.join(root, name)) for name in files])
            return size
        else:
            size = 0
            for root, dirs, files in os.walk(dir_or_file_path):
                files_names = [os.path.join(root, name) for name in files]
                off = 0
                future_sizes = []
                while off < len(files_names):
                    future_sizes.append(executor.submit(_get_batch_file_size, files_names[off: min(off+batch, len(files_names))]))
                    off = min(off+batch, len(files_names))
                for future_size in future_sizes:
                    size += future_size.result()
            return size
    else:
        return os.path.getsize(dir_or_file_path)
