import os
import re
import time
from functools import partial
from typing import (
    Any,
    Dict,
    List,
    Tuple,
    Optional,
)

import requests
from prettytable import PrettyTable

import utils.enum as enum
from settings import const
from utils.reader import read_yaml
from settings.config import Config
from utils.handler import (
    make_dir,
    remove_dir,
)
from utils.writer import (
    write_text,
    write_bytes,
)
from utils.getter import (
    get_table,
    build_tree,
    get_safe_path,
    get_format_datetime,
)


class YuQue:
    """
    语雀

    - user_id           ->      user/id             ->      用户 ID
    - repo_id           ->      repo/id             ->      知识库 ID
    - doc_id            ->      doc/id              ->      文档 ID
    """

    HEADER: Dict[str, str] = {
        "User-Agent": "Any",
        "X-Auth-Token": Config.TOKEN,
        "Content-Type": "application/json",
    }

    request_get: partial = partial(requests.get, headers=HEADER)
    get_datetime: partial = partial(get_format_datetime, fmt=Config.DATETIME_FORMAT)

    def __init__(self) -> None:
        self.start_time: float = time.time()
        _pfx: Tuple[str, str, str] = ("序号", "ID", "名称")

        # 创建导出文件夹
        make_dir(Config.EXPORT_DIR)

        # 初始化用户数据
        self.user_id: Optional[str] = None
        self.user_table: PrettyTable = get_table(
            title="用户信息",
            headers=(*_pfx, "知识库", "公开知识库", "介绍", "头像链接", "创建时间", "更新时间"),
        )

        # 初始化知识库数据
        self.repo_list: List[dict] = []
        self.repo_table: PrettyTable = get_table(
            title="知识库",
            headers=(*_pfx, "文档", "喜欢", "订阅", "介绍", "公开", "是否导出", "创建时间", "更新时间"),
        )

        # 初始化文档数据
        self.doc_id_2_info: Dict[int, dict] = {}
        self.doc_table: PrettyTable = get_table(
            title="文档",
            headers=(*_pfx, "知识库", "格式", "包含图片", "公开", "发布", "点赞", "评论", "创建时间", "更新时间"),
        )

        # 获取用户信息
        self._get_user_info()
        # 获取知识库列表
        self._get_repo_list()
        # 获取知识库详情
        self._get_repo_detail()
        # 获取所有文档
        self._get_all_doc()

    def _parse_toc(
            self,
            toc_list: List[dict],
            toc_root: dict,
    ) -> List[dict]:
        """
        解析 Toc
            - type（类型）enum.TocType.META.value
                - META:     元数据
                - DOC:      文档
                - TITLE:    目录
                - REPO:     知识库
            - uuid（ID）
            - parent_uuid（父类 ID）
            - title（名称）
            - level（层级）

        :param toc_list:    Toc 列表
        :param toc_root:    Toc 根节点
        :return:            解析完成的 Toc
        """

        full_toc_list: List[dict] = [toc_root]

        for i in toc_list:
            # 忽略：元数据（因为暂时没有用到的地方）
            if i["type"] == enum.TocType.META.value:
                continue

            data: dict = {
                "type": i["type"],
                "uuid": i["uuid"],
                "title": get_safe_path(str(i["title"])),
                "level": i["level"],
                "id": i.get("id") or None,
                "parent_uuid": i.get("parent_uuid") or toc_root["uuid"],
                "repo_id": toc_root["id"],
            }

            full_toc_list.append(data)

            if i["type"] == enum.TocType.DOC.value:
                self.doc_id_2_info.update({data["id"]: data})

        return full_toc_list

    def _make_dirs(
            self,
            data_list: List[dict],
            name_field: str,
            c_path: str = None,
    ) -> None:
        """
        创建文件夹

        :param data_list:   数据列表
        :param name_field:  名称字段
        :param c_path:      当前路径
        :return:            None
        """

        for i in data_list:
            name: str = i[name_field]
            children: List[dict] = i.get("children") or []
            path: str = os.path.join(Config.EXPORT_DIR, name)

            if c_path is not None:
                path: str = os.path.join(c_path, name)

            # 创建文档的文件夹路径（名称和文档标题一致）
            make_dir(path)

            # 文档类型需要在文档的文件夹下，创建 <文档标题.assets> 文件夹，用户存放文档中的图片（如果没有图片，后续操作单独删除该文件夹）
            if i["type"] == enum.TocType.DOC.value:
                doc_id: int = i["id"]
                assets_path: str = os.path.join(path, f"{i['title']}{const.ASSETS_DIR_SUFFIX}")
                make_dir(assets_path)

                if doc_id in self.doc_id_2_info:
                    self.doc_id_2_info[doc_id].update({"path": path})

            if children:
                self._make_dirs(children, name_field, path)

    @staticmethod
    def to_local_image_src(
            body: str,
            title: str,
    ) -> Tuple[str, List[Dict[str, str]]]:
        """
        将 Body 中的图片替换成本地图片

        :param body:    文档 Body
        :param title:   文档标题
        :return:
        """

        pattern: str = (r"!\[(?P<img_name>.*?)\]"
                        r"\((?P<img_src>https:\/\/cdn\.nlark\.com\/yuque.*\/(?P<slug>\d+)"  # noqa
                        r"\/(?P<filename>.*?\.[a-zA-z]+)).*\)")
        assets_dir_name: str = f"{title}{const.ASSETS_DIR_SUFFIX}"
        repl: str = r"![]({assets_dir_name}/\g<filename>)".format(assets_dir_name=assets_dir_name)
        images: List[Dict[str, str]] = [_.groupdict() for _ in re.finditer(pattern, body)]  # noqa
        new_body: str = re.sub(pattern, repl, body)

        return new_body, images

    def _get_user_info(self) -> None:
        """ 获取用户信息 """

        url: str = const.USER_INFO
        data: dict = self.request_get(url=url).json()["data"]

        self.user_id = data["id"]
        self.user_table.add_row([
            1,
            data["id"],
            data["name"],
            data["books_count"],
            data["public_books_count"],
            data["description"],
            data["avatar_url"],
            self.get_datetime(data["created_at"]),
            self.get_datetime(data["updated_at"]),
        ])

    def _get_repo_list(self) -> None:
        """ 获取知识库信息 """

        url: str = const.REPO_LIST.format(user_id=self.user_id)
        data_list: List[dict] = self.request_get(url=url).json()["data"]

        for idx, data in enumerate(data_list, 1):
            need_export: bool = data["name"] not in Config.IGNORE_REPOS

            self.repo_table.add_row((
                idx,
                data["id"],
                data["name"],
                data["items_count"],
                data["likes_count"],
                data["watches_count"],
                data["description"],
                enum.PublicLevel.v_l_mapping[data["public"]],
                need_export,
                self.get_datetime(data["created_at"]),
                self.get_datetime(data["updated_at"]),
            ))

            # 跳过无需导出的知识库
            if not need_export:
                continue

            self.repo_list.append({
                "id": data["id"],
                "name": data["name"],
                "slug": data["slug"],
                "namespace": data["namespace"],
            })

    def _get_repo_detail(self) -> None:
        """ 获取知识库详情 """

        for repo in self.repo_list:
            url: str = const.REPO_DETAIL.format(repo_id=repo["id"])
            data: dict = self.request_get(url=url).json()["data"]

            yaml_string: str = data["toc_yml"]
            raw_toc_list: List[dict] = read_yaml(yaml_string)

            # 根目录：知识库
            toc_root: dict = {
                "level": -1,
                "id": repo["id"],
                "uuid": repo["slug"],
                "title": repo["name"],
                "type": enum.TocType.REPO.value,
            }

            # 解析 Toc
            toc_list: List[dict] = self._parse_toc(raw_toc_list, toc_root)

            # 构建目录树
            tree_data: dict = {}
            build_tree(tree_data, toc_list, "uuid", "parent_uuid")

            # 根据目录树，递归创建目录文件夹
            self._make_dirs([tree_data], "title")

    def _get_doc_detail(
            self,
            repo_id: int,
            doc_id: int,
    ) -> Dict[str, Any]:
        """
        获取文档详情

        :param repo_id: 知识库 ID
        :param doc_id:  文档 ID
        :return:        文档详情
        """

        time.sleep(0.02)
        url: str = const.DOC_DETAIL.format(repo_id=repo_id, doc_id=doc_id)
        data: dict = self.request_get(url=url).json()["data"]

        body: str = data.get("body") or ""
        body = re.sub("<a name=\".*\"></a>", "", body)  # ->                    正则去除语雀导出的 <a> 标签
        body = re.sub("\x00", "", body)  # ->                                   移除不可见字符\x00
        body = re.sub("\x05", "", body)  # ->                                   移除不可见字符\x05
        body = re.sub(r'\<br \/\>!\[image.png\]', "\n![image.png]", body)  # -> 正则去除语雀导出的图片后紧跟的 <br \> 标签
        body = re.sub(r'\)\<br \/\>', ")\n", body)  # ->                        正则去除语雀导出的图片后紧跟的 <br \> 标签

        doc_dic: dict = {
            "body": body,
            "name": data["title"],
            "format": data["format"],
            "public": data["public"],
            "status": data["status"],
            "word_count": data["word_count"],
            "repo_name": data["book"]["name"],
            "cover": int(bool(data["cover"])),
            "likes_count": data["likes_count"],
            "comments_count": data["comments_count"],
            "create_time": self.get_datetime(data["created_at"]),
            "update_time": self.get_datetime(data["updated_at"]),
        }

        print(f"正在下载文档：{url}")

        return doc_dic

    def _get_all_doc(self) -> None:
        """ 获取所有文档 """

        for idx, (doc_id, data) in enumerate(self.doc_id_2_info.items(), 1):
            doc_data: dict = self._get_doc_detail(data["repo_id"], data["id"])
            data.update(doc_data)
            self.doc_table.add_row((
                idx,
                data["id"],
                data["name"],
                data["repo_name"],
                enum.DocFormat.v_l_mapping[data["format"]],
                bool(data["cover"]),
                enum.PublicLevel.v_l_mapping[data["public"]],
                enum.PostStatus.v_l_mapping[data["status"]],
                data["likes_count"],
                data["comments_count"],
                data["create_time"],
                data["update_time"],
            ))

    @staticmethod
    def _download_image(
            image_info: dict,
            save_path: str,
    ) -> None:
        """
        下载图片到本地

        :param image_info:  图片信息
        :param save_path:   保存路径
        :return:            None
        """

        img_src, filename = map(image_info.get, ("img_src", "filename"))
        img_save_path: str = os.path.join(save_path, filename)
        img_file: bytes = requests.get(img_src).content
        write_bytes(img_save_path, img_file)
        print(f"正在下载图片：{img_src}")

    def download_all_doc(self) -> None:
        """ 下载所有文档 """

        print("\n\n数据解析完成 开始下载文档")

        for doc_id, doc in self.doc_id_2_info.items():
            doc_path: str = str(doc["path"])

            # 仅下载语雀格式的文档（排除：画板、表格、数据表、知识库）
            if doc["format"] == enum.DocFormat.LAKE.value:
                body: str = str(doc["body"])
                title: str = str(doc["title"])
                has_image: bool = bool(doc["cover"])
                assets_path: str = os.path.join(doc_path, f"{title}{const.ASSETS_DIR_SUFFIX}")
                md_path: str = os.path.join(doc_path, f"{title}.md")

                # 如果该文档有图片，则下载图片，并替换文档中的图片路径
                if has_image:
                    body, image_list = self.to_local_image_src(body, title)
                    # 图片保存位置: doc_path/<doc_title>.assets/<filename>

                    for image_info in image_list:
                        self._download_image(image_info, assets_path)

                # 如果文档没有图片，则删除存放图片的文件夹
                else:
                    remove_dir(assets_path)

                # 写入文档.md
                write_text(md_path, body)

            # 删除非语雀格式文档的文件夹
            else:
                remove_dir(doc_path)

        print(f"\n\n下载完成！总耗时：{time.time() - self.start_time}")

        # 打印数据
        print(self.user_table)
        print(self.repo_table)
        print(self.doc_table)
