"""
* Author       : Isidore
* Date         : 2022-08-29 22:23:41
* LastEditors  : Isidore
* LastEditTime : 2022-09-02 16:48:51
* Description  : file content
* FilePath     : /Workspace/siyuan_md.py
"""

from retry import retry
import re
import time
import urllib
import shutil
import requests
from tqdm.auto import tqdm
import os
import warnings
import pandas as pd
from operator import methodcaller
from concurrent.futures import ThreadPoolExecutor, as_completed


def get_file_df(note_dir):
    file_name_list, file_path_list = [], []
    for root, folder, file in os.walk(note_dir):
        if root.endswith("assets") or "logseq" in root:
            continue
        file_name_list.extend(file)
        for f in file:
            file_path_list.append(os.path.join(root, f))

    file_df = pd.DataFrame({"path": file_path_list, "name": file_name_list})
    file_df["dir"] = file_df["path"].apply(os.path.dirname)
    func_endswith_md = methodcaller("endswith", ".md")
    file_df = file_df[file_df["name"].apply(func_endswith_md)]
    return file_df


def page_remove_bak(note_dir):
    for root, folder, file in os.walk(note_dir):
        if root.endswith("assets"):
            for f in file:
                if f.endswith(".md"):
                    os.remove(os.path.join(root, f))


def page_remove_empty(file_df):
    for file in file_df["path"].tolist():
        with open(file, encoding="utf8") as f:
            note = f.read()
        if re.sub("\s", "", note) == "":
            os.remove(file)


def page_structure_flatten(note_dir):
    reserved_dir_list = ["logseq", "pages", "journals", ".obsidian", "assets"]
    os.makedirs(os.path.join(note_dir, "pages"), exist_ok=True)
    for root, folders, files in os.walk(note_dir):
        reserved_flag = False
        for reserved_dir in reserved_dir_list:
            if root == os.path.join(note_dir, reserved_dir):
                reserved_flag = True
        if reserved_flag:
            continue
        for file in files:
            with open(os.path.join(root, file), encoding="utf8") as f:
                note = f.read()
            with open(os.path.join(root, file), "w", encoding="utf8") as f:
                f.write(re.sub("!\[[^()]*?\]\((?:\.\.\/)*(assets\/.+?)\)", "![](../\\1)", note))
            try:
                shutil.move(os.path.join(root, file), os.path.join(note_dir, "pages"))
            except:
                print(root, file)
    for folder in os.listdir(note_dir):
        reserved_flag = False
        for reserved_dir in reserved_dir_list:
            if folder == reserved_dir:
                reserved_flag = True
        if not reserved_flag:
            shutil.rmtree(os.path.join(note_dir, folder))


def page_remove_assets_folders(note_dir):
    for root, folders, files in os.walk(note_dir):
        if root.endswith("assets") and root != os.path.join(note_dir, "assets"):
            if len(files) > 0:
                print("None Empty assets Folder:", root)
            else:
                os.rmdir(root)


def replace_double_link(file_df):
    for file in file_df["path"].tolist():
        flag_fix = False
        with open(file, encoding="utf8") as f:
            note = f.read()
            if "[[[" in note:
                flag_fix = True
        if flag_fix:
            with open(file, "w", encoding="utf8") as f:
                f.write(re.sub("\[\[\[(.*)\]\]\]\(siyuan:.*?\)", r"[[\1]]", note))


def page_remove_note_title(file_df):
    for row in file_df.iterrows():
        flag_fix = False
        file_path, file_name = row[1]["path"], row[1]["name"]
        with open(file_path, encoding="utf8") as f:
            note = f.readlines()
            if note[0] == f"# {file_name[:-3]}\n" or note[0] == f"## {file_name[:-3]}\n" or note[0] == "\n":
                flag_fix = True
        if flag_fix:
            if len(note) == 1:
                os.remove(file_path)
            else:
                with open(file_path, "w", encoding="utf8") as f:
                    f.writelines(note[1:])


def replace_marks(file_df):
    for row in file_df.iterrows():
        flag_fix = False
        file_path, file_name = row[1]["path"], row[1]["name"]
        with open(file_path, encoding="utf8") as f:
            note = f.read()
            if "　" in note:
                flag_fix = True
        if flag_fix:
            with open(file_path, "w", encoding="utf8") as f:
                f.writelines(note.replace("　", " "))


def replace_numbered_list(file_df):
    for row in file_df.iterrows():
        flag_fix = False
        file_path, file_name = row[1]["path"], row[1]["name"]
        with open(file_path, encoding="utf8") as f:
            note = f.read()
            if "1. " in note:
                flag_fix = True
        if flag_fix:
            with open(file_path, "w", encoding="utf8") as f:
                f.writelines(re.sub("\d\. ", "- ", note))


def remove_multi_empty_lines(file_df):
    for file in file_df["path"].tolist():

        with open(file, encoding="utf8") as f:
            note = f.read()
        with open(file, "w", encoding="utf8") as f:
            note = re.sub(" *\n *\n( *\n)+", "\n\n", note)
            note = re.sub(" *\t* *\n( *\t* *\n)+ *```", "\n```", note)
            note = re.sub("``` *\t* *\n( *\t* *\n)+", "```\n", note)
            f.write(note)


def move_local_image(file_df, note_dir):
    for root, folders, files in os.walk(note_dir):
        assets_file_list = os.listdir(os.path.join(note_dir, "assets"))
        if root.endswith("assets") and root != os.path.join(note_dir, "assets"):
            for file in files:
                if file not in assets_file_list:
                    shutil.move(os.path.join(root, file), os.path.join(note_dir, "assets"))

    for row in file_df.iterrows():
        file_path, file_dir = row[1]["path"], row[1]["dir"]
        with open(file_path, encoding="utf8") as f:
            note = f.read()
        depth = len(file_dir.split("\\")) - 1
        with open(file_path, "w", encoding="utf8") as f:
            note = re.sub("!\[.*?\]\(assets\/(.*)\)", f"![]({'../'*depth}assets/\\1)", note)
            f.write(note)


def compress_equation(equation):
    equation_compress_dict = {
        "begin": "t",
        "end": "e",
        "left": "l",
        "right": "r",
        "array": "a",
        "times": "t",
        "bold": "b",
        "alpha": "a",
        "symbol": "s",
        "dots": "d",
        "beta": "b",
    }
    for symbol, compressed in equation_compress_dict.items():
        equation = equation.replace(symbol, compressed)
    c_sum = 0
    for i in range(len(equation)):
        if equation[i].isdigit():
            c_sum += int(equation[i])
            equation = equation.replace(equation[i], "0", 1)
        elif c_sum > 0:
            equation = equation.replace(equation[i], str(c_sum % 10), 1)
            c_sum = 0
    equation = equation.replace("0", "")
    return equation


def move_online_image(note_dir, file_df):
    headers = {
        "Connection": "keep-alive",
        "sec-ch-ua": '"Chromium";v="88", "Google Chrome";v="88", ";Not A Brand";v="99"',
        "sec-ch-ua-mobile": "?0",
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.146 Safari/537.36",
        "Accept": "image/avif,image/webp,image/apng,image/svg+xml,image/*,*/*;q=0.8",
        "Sec-Fetch-Site": "same-site",
        "Sec-Fetch-Mode": "no-cors",
        "Sec-Fetch-Dest": "image",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
    }
    proxies = {"http": "http://127.0.0.1:7890", "https": "http://127.0.0.1:7890"}

    warnings.filterwarnings("ignore")

    @retry(tries=5, delay=0.5)
    def _get(url):
        res = requests.get(url, headers=headers, timeout=3, proxies=proxies, verify=False)
        return res

    @retry(tries=5, delay=0.5)
    def _get__write_one(url, name):
        res = requests.get(url, headers=headers, timeout=3, proxies=proxies, verify=False)
        time.sleep(0.02)
        with open(os.path.join(note_dir, "assets", name), "wb") as f:
            f.write(res.content)

    assets_list = os.listdir(os.path.join(note_dir, "assets"))
    name_dict = {}
    executor = ThreadPoolExecutor(max_workers=4)

    for row in tqdm(file_df.iterrows(), total=file_df.shape[0]):
        flag_fix = False
        file_path, file_dir = row[1]["path"], row[1]["dir"]
        with open(file_path, encoding="utf8") as f:
            note = f.read()
        urls = re.findall("!\[[^()]*?\]\((https?:\/\/.+?\/.+?)\)", note)
        if len(urls) > 0:
            flag_fix = True
        if flag_fix:
            urls = set(urls)
            task_list = []

            for url in tqdm(urls, leave=False):
                ori_name = os.path.basename(url)
                if ori_name in name_dict:
                    continue
                name = urllib.parse.unquote(ori_name)
                name = re.sub("[^\w\d\.]", "", name)
                if "equation" in ori_name:
                    name = compress_equation(name)
                if not name.endswith(".jpg") and not name.endswith(".png") and not name.endswith(".svg") and not name.endswith(".gif"):
                    name = "".join(name.split("."))
                    if "zhihu.com" in url:
                        name += ".svg"
                    elif "gif.latex" in url:
                        name += ".gif"
                    else:
                        name += ".jpg"
                if name in assets_list:
                    splitted_name_list = name.split(".")
                    name = "".join(splitted_name_list[:-1] + ["_", str(int(time.time() * 1000) % 100000)]) + "." + splitted_name_list[-1]
                    time.sleep(0.001)
                    print(name)
                name_dict[ori_name] = name
                assets_list.append(name)
                task = executor.submit(_get__write_one, url, name)
                task_list.append(task)
            for task in tqdm(as_completed(task_list), total=len(task_list)):
                pass

            depth = len(file_dir.split("\\")) - 1
            with open(file_path, "w", encoding="utf8") as f:
                note = re.sub("!\[[^\(\)]*?\]\(https?:\/\/[^\(\)]+\/([^\(\)]+?)\)", f"![]({'../'*depth}assets/\\1)", note)
                for ori_name, name in name_dict.items():
                    note = re.sub("(?<=\/)" + re.escape(ori_name) + "(?=\))", name, note)
                f.write(note)


def replace_image_anchor(file_df):
    for row in file_df.iterrows():
        flag_fix = False
        file_path, file_name = row[1]["path"], row[1]["name"]
        with open(file_path, encoding="utf8") as f:
            note = f.read()
            if "![[" in note:
                flag_fix = True
        if flag_fix:
            with open(file_path, "w", encoding="utf8") as f:
                f.writelines(re.sub("!\[\[(.*?)\]\]", f"![\\1]", note))


def replace_nested_url(file_df):
    for row in file_df.iterrows():
        flag_fix = False
        file_path, file_name = row[1]["path"], row[1]["name"]
        with open(file_path, encoding="utf8") as f:
            note = f.read()
            if "[![" in note:
                flag_fix = True
        if flag_fix:
            with open(file_path, "w", encoding="utf8") as f:
                f.writelines(re.sub("\[!\[[^()]*?\]\(((?:\.\.\/)*assets\/.+?)\)\s?\]\(.+?\)", f"![](\\1)", note))


def remove_image_pre_blank(file_df):
    for row in file_df.iterrows():
        flag_fix = False
        file_path, file_name = row[1]["path"], row[1]["name"]
        with open(file_path, encoding="utf8") as f:
            note = f.read()
            if " !\[" in note:
                flag_fix = True
        if flag_fix:
            with open(file_path, "w", encoding="utf8") as f:
                f.writelines(re.sub("\s+(\[!\[[^()]*?\]\((?:\.\.\/)*assets\/.+\/.+?\)\]\(.+?\))", "\\1", note))


# move_pages("笔记二泉")


def add_property_title(note_dir):
    for file in os.listdir(os.path.join(note_dir, "pages")):
        with open(os.path.join(note_dir, "pages", file), encoding="utf8") as f:
            note = f.read()
        if "\n\n" not in note:
            continue
        property, body = note.split("\n\n", 1)
        if "title" not in property and ":: " in property.split("\n")[0]:
            property += f"\ntitle:: {file[:-3]}"
            with open(os.path.join(note_dir, "pages", file), "w", encoding="utf8") as f:
                f.write("\n\n".join([property, body]))


# add_property_title("笔记二泉")


def combine_files(target_dir="凝练的思考/一点日记/2021成功司机", out_file="凝练的思考/一点日记/2021成功司机.md"):
    note_list = []
    for file in os.listdir(target_dir):
        with open(os.path.join(target_dir, file), encoding="utf8") as f:
            note = f.readlines()
        if "#" in note[0]:
            note[0] = note[0].replace(" ", "").replace("#", "")
        note_list.append("\n".join(note))
    note_collection = "\n---\n".join(note_list)
    note_collection = note_collection.replace("\n\n", "\n")

    with open(out_file, "w", encoding="utf8") as f:
        f.write(note_collection)


def replace_double_eq(file_df):
    for row in file_df.iterrows():
        flag_fix = False
        file_path, file_name = row[1]["path"], row[1]["name"]
        with open(file_path, encoding="utf8") as f:
            note = f.read()
            if "==" in note:
                flag_fix = True
        if flag_fix:
            with open(file_path, "w", encoding="utf8") as f:
                f.writelines(re.sub("(?<! )==(?! )", " == ", note))


def change_date(file_df):
    for file in file_df["path"].tolist():
        with open(file, encoding="utf8") as f:
            note = f.read()
        date_list = re.findall("\[\[ *\d{1,2}-\d{1,2}-\d{4}\]\]", note)
        if len(date_list) > 0:
            date_digit_list = [re.findall("(\d{1,2})-(\d{1,2})-(\d{4})", item)[0] for item in date_list]
            for date, date_digit in zip(date_list, date_digit_list):
                note = note.replace(date, f"[[{int(date_digit[2]):4d}-{int(date_digit[0]):02d}-{int(date_digit[1]):02d}]]")
            with open(file, "w", encoding="utf8") as f:
                f.write(note)


def check_duplicate_file_names(file_df):
    distinct_idx = file_df["name"].drop_duplicates().index
    duplicate_idx = set(file_df.index) - set(distinct_idx)
    duplicate_file_name_list = file_df.loc[list(duplicate_idx), "name"].tolist()
    duplicate_file_df = file_df[file_df["name"].isin(duplicate_file_name_list)].sort_values("name")
    print(duplicate_file_df["path"].tolist())


def main():
    note_dir = "凝练的思考"
    os.makedirs(os.path.join(note_dir, "assets"), exist_ok=True)
    file_df = get_file_df(note_dir)
    page_remove_empty(file_df)
    file_df = get_file_df(note_dir)
    page_remove_note_title(file_df)
    file_df = get_file_df(note_dir)
    page_remove_note_title(file_df)
    page_remove_bak(note_dir)
    file_df = get_file_df(note_dir)
    page_remove_empty(file_df)
    file_df = get_file_df(note_dir)
    replace_marks(file_df)
    replace_numbered_list(file_df)
    remove_multi_empty_lines(file_df)

    replace_image_anchor(file_df)
    move_local_image(file_df, note_dir)
    move_online_image(note_dir, file_df)
    replace_nested_url(file_df)
    remove_image_pre_blank(file_df)
    replace_double_eq(file_df)

    page_structure_flatten(note_dir)
    check_duplicate_file_names(file_df)


if __name__ == "__main__":
    main()

# move_pages(note_dir)
# add_property_title(note_dir)
