"""
some general purpose utilities.

"""

import msvcrt
import os
import pickle as pkl
import re
import shutil
from configparser import ConfigParser, MissingSectionHeaderError
from pathlib import Path
from collections import Counter
from typing import Any, Callable, Dict, List, Optional, Tuple, Union, Iterable
from itertools import chain
from functools import reduce
import chardet
from traceback import print_exc
from tempfile import NamedTemporaryFile
import json

import pandas as pd
from pandas import DataFrame
from PIL import Image
from termcolor import colored
from send2trash import send2trash
from pyclibrary import CParser


__all__ = []


def export(f):
    __all__.append(f.__name__)
    return f


@export
class WorkingDirScope:
    def __init__(self, change_to: Union[str, Path]):
        self.change_to = Path(change_to)
        self.from_dir = Path.cwd()

    def __enter__(self):
        os.chdir(self.change_to)

    def __exit__(self, *args):
        os.chdir(self.from_dir)


@export
class Ops(Exception):
    def __init__(self, *args):
        super().__init__(*args)


@export
def data_store_to(obj: Any, file_path: str):
    if not file_path.endswith(".pkl"):
        print("Data Store Warning: write a pickle file without proper extension name. -> " + file_path)
    with open(file_path, "wb") as f:
        pkl.dump(obj, f)


@export
def data_load_from(file_path: str) -> Any:
    with open(file_path, "rb") as f:
        return pkl.load(f)


@export
# only attributes of type str will be contained in dict.
def object_to_dict(o: Any) -> Dict[str, str]:
    d = o.__dict__
    dd = dict(filter(lambda i: type(i[1]) in (str,), d.items()))
    return dd


@export
def object_from_dict(o: Any, d: Dict[str, str]) -> Any:
    o.__dict__.update(d)
    return o


def ini_parser() -> ConfigParser:
    cp = ConfigParser(
        allow_no_value=True,
    )
    cp.optionxform = str
    return cp


@export
def data_store_to_ini(
    d: Dict[str, str],
    file_path: Union[Path, str],
    encoding: str = "GB18030",
    *,
    no_default_section_in_file=True,
):
    cp = ini_parser()
    for k, v in d.items():
        cp.set(cp.default_section, k, v)
    with open(file_path, "w", encoding=encoding) as f:
        cp.write(f)

    if no_default_section_in_file:
        with open(file_path, "r", encoding=encoding) as f:
            t = f.read()
        with open(file_path, "w", encoding=encoding) as f:
            f.write(t.replace(f"[{cp.default_section}]", "").strip())


def load_config_from_ini(file_path: Union[Path, str], encoding: str = "GB18030") -> ConfigParser:
    cp = ini_parser()
    with open(file_path, "r", encoding=encoding) as f:
        text = f.read()
    try:
        cp.read_string(text)
    except MissingSectionHeaderError:  # configparser 要求ini 文件必须以一个section 开始，不能有完全游离的键值对，
        # 所以若发生了没有section 的错误，就手动加上一个默认section
        text = f"[{cp.default_section}]\n" + text
        cp.read_string(text)
    return cp


# only items in default section of a ini file will be read
@export
def data_load_from_ini(file_path: Union[Path, str], encoding: str = "GB18030") -> Dict[str, str]:
    cp = load_config_from_ini(file_path, encoding)
    return dict(cp.items(cp.default_section))


@export
def file_name_regulate(file_name: str, replace_with: str = "_") -> str:
    """
    将文件名中的不合法字符替换为下划线，或用replace_with 指定的字符
    """
    r = r"[\/\\\:\*\?\"\<\>\|\t]"
    return re.sub(r, replace_with, file_name).strip()


@export
def file_name_trim(file_name: str) -> str:
    """
    删除头尾的空格；将连续多个空格削减成一个；删除左右圆括号内紧贴的空格；删除空括号
    AAA    BBB => AAA BBB
    (  AAA )   => (AAA)
    AAA( ) BBB => AAA BBB
    """
    file_name = file_name.strip()
    p = r"\s\s+"
    file_name = re.sub(p, " ", file_name)
    pp = r"\(\s+"
    file_name = re.sub(pp, "(", file_name)
    ppp = r"\s+\)"
    file_name = re.sub(ppp, "(", file_name)
    pppp = r"\(\)"
    file_name = re.sub(pppp, "", file_name)
    return file_name


@export
def load_csv(f: str) -> DataFrame:
    df = pd.read_csv(f, index_col=0, encoding="gb18030")
    return df


@export
def store_csv(f: str, data: DataFrame):
    data.to_csv(f, line_terminator="\r\n", encoding="GB18030", float_format="%.2f")


@export
def avg(*val: Any) -> Any:
    return sum(val) / len(val)


Second = int
Minute_Second = Tuple[int, int]


@export
def from_min_sec(*val: Minute_Second) -> List[Second]:
    l = []
    for v in val:
        l.append(v[0] * 60 + v[1])
    return l


@export
def to_min_sec(*val: Second) -> List[Minute_Second]:
    l = []
    for v in val:
        l.append((v // 60, v % 60))
    return l


@export
def getkey() -> Optional[int]:
    """无阻塞获取按键输入"""
    if msvcrt.kbhit():
        return msvcrt.getch()[0]
    else:
        return None


@export
def pix_size_filter(img_dir, dest=r"d:/Pictures/Illustration/paper_xx", *, move_file=False):
    """筛选图片，将尺寸大于1920x1080 的复制或移动到目标文件夹"""
    if img_dir is not Path:
        img_dir = Path(img_dir)
        count = 0
        for f in img_dir.iterdir():
            if f.is_dir():
                continue
            with Image.open(str(f)) as img:
                w, h = img.size
            if w >= 1920 and h >= 1080:
                count += 1
                print(f"{f} - ({w} x {h})")
                if move_file:
                    shutil.move(f, dest)
                else:
                    shutil.copy(f, dest)
        print(f"-> all count: {count}.")


@export
class FileBatchNameKid:
    """方便批量重命名文件的工具"""

    def __init__(self, parent_dir: Union[str, Path], filter: str) -> None:
        self.parent_dir: Path = Path(parent_dir)
        self.what_will_be: Dict[Path, Path] = {}
        self.filter = filter

    def set_filter(self, filter: str):
        self.filter = filter

    def try_name(self, new_name: Callable[[Path], str], filter: Optional[str] = None) -> "FileBatchNameKid":
        self.what_will_be.clear()
        if filter is None:
            filter = self.filter
        count = 0
        for f in self.parent_dir.glob(filter):
            if not f.is_file():
                continue
            fname_new = self.parent_dir / new_name(f)
            self.what_will_be[f.resolve()] = fname_new.resolve()
            print(f"--> rename: [{f.name}] ==> [{fname_new.name}].")
            count += 1
        print(f"-> all count: {count}.")
        print("-> call 'proceed()' to rename actually.")
        return self

    def proceed(self):
        for f in self.what_will_be:
            f.rename(self.what_will_be[f])
        print("-> done.")

    def regret(self):
        """
        尝试把文件名恢复到proceed 执行前。
        有风险。
        """
        revert_map = {v: k for (k, v) in self.what_will_be.items()}
        for f in revert_map:
            f.rename(revert_map[f])
        print("-> done.")


@export
def bungumi_download_reorg(parent_dir: str, prefix: str) -> FileBatchNameKid:
    k = FileBatchNameKid(parent_dir, f"{prefix}*")
    pattern = r"第([\d\.]+)集"

    def new_name(f: Path):
        a = f.name.split(" - ")[1]
        ext = f.name[-4:]
        if n := re.search(pattern, a):
            return "第{0:0>3}话{1}".format(n.group(1), ext)
        else:
            return a  # type: ignore

    return k.try_name(new_name)


@export
def bungumi_download_reorg_prompt(parent_dir: str, prefix: str):
    k = bungumi_download_reorg(parent_dir, prefix)
    yes_or = input("input nothing to proceed, cancel otherwise.\n=> ")
    if yes_or == "":
        k.proceed()


@export
def age_file_rename(parent_dir: str) -> FileBatchNameKid:
    return bungumi_download_reorg_prompt(parent_dir, "agefans")


@export
def yhdm_file_rename(parent_dir: str) -> FileBatchNameKid:
    return bungumi_download_reorg_prompt(parent_dir, "yhdm")


@export
def color_function_gen(color_name: str) -> Callable:
    def color_function(
        text: str,
        *,
        highlight: Optional[str] = None,
        bold=False,
        blink=False,
        underline=False,
        reverse=False,
        dark=False,
    ) -> str:
        attr = []
        if bold:
            attr.append("bold")
        if blink:
            attr.append("blink")
        if underline:
            attr.append("underline")
        if reverse:
            attr.append("reverse")
        if dark:
            attr.append("dark")
        if highlight:
            highlight = "on_" + highlight
            return colored(text, color_name, highlight, attrs=attr)
        else:
            return colored(text, color_name, attrs=attr)

    return color_function


red = color_function_gen("red")
yellow = color_function_gen("yellow")
cyan = color_function_gen("cyan")
green = color_function_gen("green")
white = color_function_gen("white")


@export
def merge_bili_mp4(parent_dir: Union[str, Path], *, remove_when_succeed=False):
    count = 0
    error_list = []
    parent_dir = Path(parent_dir)
    with WorkingDirScope(parent_dir):
        l = filter(
            lambda f: f.startswith("bili") and (f.endswith("mp4") or f.endswith("m4s")),
            os.listdir(),
        )
        p = r"( \(\d\))"
        lm = map(lambda f: re.sub(p, "", f), l)
        c = Counter(lm)
        for k in c:
            print(f"-> working on: {k}.")
            if c[k] != 2:
                print(red("!> file components count is not 2, skip."))
                continue
            out_name = k[15:-4]
            k_name = k[:-4]
            k_ext = k[-4:]
            file_another = f"{k_name} (1){k_ext}"
            error_level = os.system(f'ffmpeg -hide_banner -i "{k}" -i "{file_another}" -codec copy "{out_name}.mp4"')
            print(f"-> ffmpeg done with error level [ {error_level} ].")
            if error_level != 0:
                error_list.append((error_level, k_name))
            elif remove_when_succeed:
                os.remove(k)
                os.remove(file_another)
                print("-> source files have been removed.")
            count += 1
    print(f"-> all count: {count}")
    if len(error_list) > 0:
        print(f"!> [ {len(error_list)} ] file have not been processed corectly.")
        for i in error_list:
            e, k = i
            print(f"!> error [ {e} ], file: {k}")


@export
def merge_mp4(
    parent_dir: Union[str, Path],
    prefix: str,
    *,
    remove_when_succeed=False,
    title_list: Optional[List[str]] = None,
):
    error_list = []
    with WorkingDirScope(parent_dir):
        if (file0 := Path(prefix + ".mp4")).exists():
            file0.rename(prefix + "(0).mp4")

        file_dict = {}
        name_pattern = f"{re.escape(prefix)}\\((\\d+)\\)\\.mp4"
        p = Path.cwd()
        for f in p.iterdir():
            name = f.name
            m = re.match(name_pattern, name)
            if not m or len(m.groups()) != 1:
                continue
            file_num = int(m.groups()[0])
            if file_num in file_dict:
                print(red("-!> Duplicate file num.", reverse=True))
                print(red(file_dict[file_num].name))
                print(f.name)
                return
            file_dict[file_num] = f

        num_list = list(file_dict.keys())
        num_list.sort()

        # 视频文件都是两个一组，编号可能不连续，但总是先下载的视频的两个文件在前，后下载的在后。
        # 假定文件编号越大，下载时间越后，那就只要按顺序，一次取两个文件就行。

        mega_size = lambda f: round(f.stat().st_size / 1e6, 2)
        out_file_index = 0
        error_list = []
        for i in range(0, len(num_list), 2):
            f0 = file_dict[num_list[i]]
            f1 = file_dict[num_list[i + 1]]
            size0 = mega_size(f0)
            size1 = mega_size(f1)
            print(f"--> Pick file 0: {f0.name}")
            print(f"--> File 0 size: {size0} MB")
            print(f"--> Pick file 1: {f1.name}")
            print(f"--> File 1 size: {size1} MB")

            if title_list is not None:
                out_name = title_list[out_file_index]
            else:
                out_name = "{:<03}".format(out_file_index)
            error_level = os.system(f'ffmpeg -hide_banner -i "{f0.name}" -i "{f1.name}" -codec copy "{out_name}.mp4"')
            print(f"-> ffmpeg done with error level [ {error_level} ].")
            out_file_index += 1

            if error_level != 0:
                error_list.append((f0, f1))
            elif remove_when_succeed:
                send2trash([f0, f1])
                print("-> source files have been removed.")

        print(f"-> all count: {out_file_index}")
        if len(error_list) > 0:
            print(f"!> [ {len(error_list)} ] pairs of files have not been processed corectly.")
            for i in error_list:
                m, n = i
                print(f"!> f0:[{m}], f1: {n}.")


@export
def remove_duplicated_bpg(folder_path: Path):
    for f in folder_path.glob("*.bpg"):
        bpg_name_without_suffix = f.name.removesuffix(".bpg")
        check_list = (".webp", ".jpg", ".png", ".jpeg")
        for s in check_list:
            file_path = folder_path / (bpg_name_without_suffix + s)
            if file_path.exists():
                print(red(f"-> remvoe {f.name}"))
                os.remove(f)
                break


@export
def remove_bpg_converted(folder_path: Path, *, recursive=False):
    # *.[jp][pn]g 匹配jpg png jng ppg
    if not recursive:
        f_list = chain(folder_path.glob("*.jpg"), folder_path.glob("*.png"), folder_path.glob("*.jpeg"))
    else:
        f_list = chain(folder_path.rglob("*.jpg"), folder_path.rglob("*.png"), folder_path.rglob("*.jpeg"))

    for f in f_list:
        name = f.name
        base_name = name[: name.rindex(".")]
        while True:
            bpg_file_path = f.parent / (base_name + ".bpg")
            if bpg_file_path.exists():
                print(red(f"-> remove {f.name}"))
                os.remove(f)
            elif base_name.endswith("(1)"):
                base_name = base_name.removesuffix(" (1)")
                continue
            break


@export
def text_file_encoding_convert(
    f: Path, target_encoding: str, *, dry_run=False, target_file: Path = None, remove_cr: bool = False
) -> (bool, str, float):
    """转换单个文件到目标编码
    @param  f                   文件路径
    @param  target_encoding     目标编码，比如urf-8
    @param  target_file         文件另存为。若为None，转化结果写入源文件，否则写入target_file
    @param  dry_run             为True 时不实际修改源文件
    @param  remove_cr           为True 时删除文件中的\r，也就是从CRLF 转化成LF

    @return  返回三个值分别为（是否成功，估计的源文件编码，估计的把握）
    """
    target_encoding = target_encoding.lower()  # python 的标准编码名称都是小写
    raw = f.read_bytes()
    result = chardet.detect(raw)

    if result["encoding"] is None:
        raise Ops(f"{f}")

    encoding = result["encoding"].lower()  # chardet 估计出来的编码名称
    confidence = result["confidence"]  # 估计的把握

    if encoding == "gb2312" and confidence < 1.0:  # gb18030 与gb2312 兼容，可能会将gb18030 识别为gb2312，但解码时会出错
        encoding = "gb18030"
        sys_print(f"?> gb2312 may actually be gb18030")

    flag = True

    # 下面的单次for 循环用来避免重复写return 语句，break 后统一跳到最后return
    for i in (1,):
        if target_file is None and (encoding == target_encoding or encoding == "ascii" and target_encoding == "utf-8"):
            # 目标编码和源编码相同时不用做处理。utf-8 编码和ASCII 兼容，原编码为ASCII 时转换到utf-8 不会有变化，所以也跳过
            print(f"-> [NO CONVERSION NEEDED] {f.name}: {encoding} ==> [ {target_encoding} ]")
            break

        try:
            text = raw.decode(encoding)
        except:
            print(red(f"!> Encoding err: {f.name}, detected: {encoding}, {confidence}."))
            flag = False
            break

        if target_file is not None:
            dest_f = target_file
        else:
            dest_f = f

        if dry_run:
            print(f"-> [ NO WET ] {dest_f.name}: {encoding} ==> [ {target_encoding} ]")
        else:
            # 必须先用目标编码转换成字节数组，然后按字节写入源文件
            # 如果按文本方式写入，就会遇到喜闻乐见的CR LF 换行问题，
            # 源文件中的CR LF 换行会被自动变成CR CR LF，也就是多了一堆空行。
            if remove_cr:
                out = text.replace("\r", "").encode(target_encoding)
            else:
                out = text.encode(target_encoding)
            dest_f.write_bytes(out)
            print(cyan(f"-> {dest_f.name}: {encoding} ==> [ {target_encoding} ]"))

    return (flag, encoding, confidence)


@export
def text_file_encoding_batch_convert(
    folder: Path,
    target_encoding: str,
    *,
    dry_run=True,
    recursive=False,
    ext=["c", "h", "txt", "hpp", "hxx", "cpp", "cxx", "csv", "asm"],
    skip_when_error=True,
):
    """批量转换一个目录下文本文件的编码
    @param  folder             目标目录
    @param  target_encoding    目标编码
    @param  dry_run            不实际修改源文件，避免手滑写错
    @param  recursive          包括所有子文件夹下的文件
    @param  pattern            基于文件名筛选文本文件的正则表达式，默认根据后缀筛选几种文本类型
    @param  skip_when_error    默认True，单个文件转换出错时提示并跳过，否则终止
    """
    if recursive:
        flist = chain(*[folder.rglob(f"*.{e}") for e in ext])
    else:
        flist = chain(*[folder.glob(f"*.{e}") for e in ext])

    for f in flist:
        if not f.is_file():
            continue

        ok, encoding, confidence = text_file_encoding_convert(f, target_encoding, dry_run=dry_run)
        if not ok:
            if skip_when_error:
                print(yellow("!> SKIP."))
            else:
                print(red("!> ABORT."))
                return


@export
def create_temp_copy_from_text_file(
    src_path: Union[Path, str], dest_path: Union[Path, str], dest_encoding: str, *, remove_cr=True
):
    text_file_encoding_convert(Path(src_path), dest_encoding, target_file=Path(dest_path), remove_cr=remove_cr)


@export
def parse_c_header_symbol(file_path: Path) -> Dict:
    parser = CParser([str(file_path.absolute())])
    parser.defs.pop("values")
    parser.defs.pop("variables")
    return parser.defs


@export
def print_c_parsed_symbol(parsed_result):
    print("/*")
    for k in parsed_result.keys():
        symbol_map = parsed_result[k].keys()
        if symbol_map is None or len(symbol_map) == 0:
            continue
        title = k.upper()
        print()
        print(f"//  ======  {title}  ======")

        count = 0
        for symbol in symbol_map:
            count += 1
            print(symbol, end="  ")
            if count >= 10:
                print("")
                count = 0
        print("\n")
    print("*/")


# extension 的形式是.jpg，要包含“点”
@export
def change_file_name_extension(file_name: str, extension: str) -> str:
    file_name_no_ext = file_name[: file_name.rfind(".")]
    return file_name_no_ext + extension


@export
def add_header_to_file(file_path: Union[Path, str], header: bytes):
    with open(file_path, "r+b") as f:
        org_contents = f.read()
        f.seek(0)
        f.write(header + org_contents)


@export
def run_and_catch(f: Callable, *, pause_after_catch=False) -> Any:
    try:
        return f()
    except:
        print()
        print_exc()
        if pause_after_catch:
            os.system("pause")
    return None


@export
def restore_rpgmvp(f: Path):
    """
    png 文件的前16 字节被加密，然后加上了16 字节rpgmvp 的文件头，也就是文件开头的32 个字节部分。
    还原的原理就是干脆把这32 字节剪切掉，然后把正常的png 文件前面16 字节部分拼接上去
    """
    if f.suffix not in (".rpgmvp", ".png_"):
        red(f"!-> Invalid RPGMVP file: {str(f)}")
        return
    png_header = bytes(b"\x89\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52")
    header_len = len(png_header)
    dat = f.read_bytes()
    dat = dat[header_len * 2 :]
    restore_dat = png_header + dat
    png_name = f.stem + ".png"
    new_file = f.parent / png_name
    new_file.write_bytes(restore_dat)


@export
def file_move_and_rename(src: Path, dest_dir: Path, new_name: Optional[str] = None):
    if new_name is None:
        new_name = src.name
    src.rename(src.parent / new_name)


"""
def E_trans_to_C(string):
    E_pun = u',.!?[]()<>"\''
    C_pun = u'，。！？【】（）《》“‘'
    table= {ord(f):ord(t) for f,t in zip(E_pun,C_pun)}
    return string.translate(table)
————————————————
版权声明：本文为CSDN博主「SNII_629」的原创文章，遵循CC 4.0 BY-SA版权协议，转载请附上原文出处链接及本声明。
原文链接：https://blog.csdn.net/nanbei2463776506/article/details/82967140
"""


@export
def punctuation_ch_to_en(target: str) -> str:
    tbl = {
        "，": ",",
        "。": ".",
        "？": "?",
        "！": "!",
        "【": "[",
        "】": "]",
        "（": "(",
        "）": ")",
        "《": "<",
        "》": ">",
        "”": '"',
        "‘": "'",
        "’": "'",
        "“": '"',
        "￥": "$",
    }
    t = str.maketrans(tbl)
    return target.translate(t)


@export
def text_find_and_split(pattern: Union[str, re.Pattern], text: str) -> List[Tuple[str, Optional[re.Match]]]:
    r"""
    将字符串按正则匹配分割，返回被分割的字符串片段和匹配结果，匹配的片段会从原字符串中被整体切割出去。

    比如：原字符串: AAAA $01$ BBBB $02$ CCCC 正则：$(\d+)$，
    切割后的字符串变成 AAAA , " BBBB ", " CCCC" ，被匹配的片段 "$01$" 和 "$02$" 通过Match 对象返回.

    字符串片段和Match 对象被混合起来作为列表返回，每个列表元素是一个二元组，前者是匹配位置前的字符串片段，后者是Match 对象.
    上面的例子的返回形式为：

    [
         ("AAAA ", Match("$01$")),
         (" BBBB ", Match("$02$")),
         (" CCCC", None)
    ]

    最后一个片段后面没有匹配，所以Match 对象用None 代替

    Parameters
    ----------
    pattern : Union[str, re.Pattern]
        正则表达式，可以是字符串或编译后的Pattern 对象.
    text : str
        原字符串.

    Returns
    -------
    result_list : List[Tuple[str, Optional[re.Match]]]
        返回值是一个列表，其中每个元组包含两个元素，前者是匹配前的字符串片段，后者是Match 对象.

    """
    m_iter = pattern.finditer(text) if isinstance(pattern, re.Pattern) else re.finditer(pattern, text)
    result_list = []
    last_stop = 0
    for m in m_iter:
        start, stop = m.span()
        s = text[last_stop:start]
        last_stop = stop
        result_list.append((s, m))

    result_list.append((text[last_stop:], None))
    return result_list


@export
def search_in_frame(df: DataFrame, col: Any, val: Any) -> List[Any]:
    """
    在一列中搜索特定值

    Parameters
    ----------
    df : DataFrame
        表格.
    col : Any
        列名.
    val : Any
        目标值.

    Returns
    -------
    List[Any]
        index 的列表.

    """
    s = df[col]
    result = s[s == val]
    return list(result.index)


@export
def sys_text(s: str) -> str:
    if s.startswith("!>"):
        return red(s, bold=True)
    elif s.startswith("?>"):
        return yellow(s, bold=True)
    elif s.startswith("#>"):
        return cyan(s, bold=True)
    else:
        return s


@export
def sys_print(s: str) -> None:
    print(sys_text(s))


@export
def str_contains(s: str, sub_str: Union[List[str], str]) -> bool:
    if isinstance(sub_str, str):
        return sub_str in s
    elif len(sub_str) == 0:
        return False
    else:
        return reduce(lambda a, b: a or b, map(lambda x: x in s, sub_str))


@export
def multi_glob(parent: Path, pattern_list: List[str], *, recursive=False) -> Iterable[Path]:
    if not recursive:
        func_glob = lambda s: parent.glob(s)
    else:
        func_glob = lambda s: parent.rglob(s)

    return chain(*(map(func_glob, pattern_list)))


@export
def clang_format_cxx(folder_path: Path, *, recursive=False, dry_run=True):
    ext_list = ["*.cpp", "*.cxx", "*.c", "*.h", "*.hpp", "*.hxx"]
    f_list = multi_glob(folder_path, ext_list, recursive=recursive)
    path_list = map(lambda f: f.as_posix(), f_list)

    with WorkingDirScope(folder_path):
        temp_file = NamedTemporaryFile(prefix="source_list_", suffix=".txt", dir=folder_path, delete=False)
        tmp_name = temp_file.name
        text = "\n".join(path_list)
        temp_file.write(text.encode("utf-8"))
        temp_file.close()

        print("== SOURCE FILE LIST ==")
        print(text)

        if dry_run:
            print("Dry Run Ended.")
        else:
            os.system(f'clang-format -i -files="{tmp_name}"')
            print("Done.")

        os.remove(tmp_name)


@export
def format_json(file_path: Path):
    s = file_path.read_text()
    j = json.loads(s)
    ss = json.dumps(j, indent=4)
    file_path.write_text(ss)


@export
def markdown_image_regex_pattern() -> str:
    return r"\!\[(.+?)\]\((.+?)\)"


@export
def markdown_find_all_pic(text: str) -> List[Tuple[str, str]]:
    pat = markdown_image_regex_pattern()
    return re.findall(pat, text, flags=re.MULTILINE)


@export
def parse_int(num_str: str, default_base: int) -> int:
    """解析单个数字字符串，支持进制前缀，只支持小写字母"""
    if num_str.startswith("0x"):
        return int(num_str, 16)
    elif num_str.startswith("0b"):
        return int(num_str, 2)
    elif num_str.startswith("0o"):
        return int(num_str, 8)
    else:
        return int(num_str, default_base)


@export
def parse_range_list(s: str, num_base: int = 10, *, allow_negative=False) -> list[int]:
    """
    解析包含整数范围的字符串，返回升序排列的唯一整数列表

    参数：
    s - 输入字符串，格式示例："1-5,8;9，0xA:0xF"
    num_base - 数字的默认进制（当没有前缀时使用），默认为10进制

    特性：
    1. 支持分隔符：, ; 和中文逗号
    2. 支持范围符：- 和冒号
    3. 支持数字前缀（0x/0X=16进制，0b/0B=2进制，0o/0O=8进制）
    """
    numbers = []
    s = s.lower()
    parts = re.split("[,;，]", s)
    for part in parts:
        part = part.strip()
        if len(part) == 0:
            # 跳过空白
            continue
        part_parts = re.split(r"[:\-]", part)
        if len(part_parts) > 1:
            # 处理范围
            start = parse_int(part_parts[0], num_base)
            end = parse_int(part_parts[1], num_base)
            # 确保 start <= end
            lower, upper = min(start, end), max(start, end)
            if not allow_negative and lower < 0:
                raise ValueError("Negative Number not Allowed")
            numbers.extend(range(lower, upper + 1))
        else:
            # 处理单独数字
            n = parse_int(part, num_base)
            if not allow_negative and n < 0:
                raise ValueError("Negative Number not Allowed")
            numbers.append(n)
    # 去重并排序
    l = list(set(numbers))
    l.sort()
    return l


@export
def gbstr(s: str) -> list[int]:
    l = []
    for c in s:
        bb = c.encode("gb2312")
        d = (bb[0] << 8) + bb[1]
        l.append(d)
    print("{ ", end="")
    print(", ".join(map(hex, l)), end="")
    print(" }")
    return l


@export
def gbchar(s: str):
    l = gbstr(s)
    for i in range(len(s)):
        print(f"constexpr uint16_t {s[i]} = {hex(l[i])};")
        print()
