import requests
from pathlib import Path
from tqdm import tqdm, trange
from furl import furl
from datetime import datetime
from pytz import timezone

from queue import Queue
from concurrent.futures import ThreadPoolExecutor as Pool


def _from_bookstack_datetime(dt):
    return datetime.fromisoformat(dt.rstrip("Z")).replace(tzinfo=timezone("Etc/GMT0"))


def _fromtimestamp_with_tz(timestamp):
    return datetime.fromtimestamp(timestamp).astimezone(timezone("Asia/Shanghai"))


class BookStackConn:
    def __init__(self, baseurl, token_ID, token_value, thread_count):
        self.HEADERs = {"Authorization": "Token %s:%s" % (token_ID, token_value)}
        self.baseurl = baseurl
        self.thread_count = thread_count

    # ============上传============
    def _upload_book(self, book_info, book_dir: Path):
        book_id, book_name = book_info["id"], book_info["name"]
        chapter_dirs = sorted(
            filter(lambda j: j.is_dir(), book_dir.glob("*")), key=lambda i: i.stem
        )
        pages_no_chapter = sorted(
            filter(lambda j: not j.is_dir(), book_dir.glob("*.md")),
            key=lambda i: i.stem,
        )
        pages_with_chapter = sorted(
            (i for d in chapter_dirs for i in d.glob("*.md")), key=lambda i: i.stem
        )
        page_count = len(pages_no_chapter) + len(pages_with_chapter)
        # ======chapter======
        chapter_infos = {}
        with tqdm(chapter_dirs, book_name + "-章", ascii=True) as bar2:
            for chapter_dir in bar2:
                chapter_response = requests.post(
                    furl(self.baseurl, path="api/chapters"),
                    headers=self.HEADERs,
                    json={
                        "book_id": book_id,
                        "name": chapter_dir.stem,
                    },
                )
                chapter_info = chapter_response.json()
                chapter_infos[chapter_info["name"]] = chapter_info["id"]
        # ======pages in chapter======
        with trange(page_count, desc=book_name + "-页", ascii=True) as bar:
            for file in pages_with_chapter:
                requests.post(
                    furl(self.baseurl, path="api/pages"),
                    headers=self.HEADERs,
                    json={
                        "chapter_id": chapter_infos[file.parent.stem],
                        "name": file.stem,
                        "markdown": file.read_text("utf-8"),
                    },
                )
                bar.update()
            for file in pages_no_chapter:
                requests.post(
                    furl(self.baseurl, path="api/pages"),
                    headers=self.HEADERs,
                    json={
                        "book_id": book_id,
                        "name": file.stem,
                        "markdown": file.read_text("utf-8"),
                    },
                )
                bar.update()

    def upload_books(self, uploaddir: Path, book_names=()):
        "假设云端上还不存在这本书"
        if len(book_names) > 0:
            book_names = set(book_names)
            book_names = sorted(
                filter(
                    lambda j: j.stem in book_names and j.is_dir(),
                    uploaddir.glob("*"),
                ),
                key=lambda i: i.stem,
            )
        else:
            book_names = sorted(
                filter(lambda j: j.is_dir(), uploaddir.glob("*")),
                key=lambda i: i.stem,
            )
        with tqdm(book_names, "所有书籍", ascii=True, leave=False) as bar:
            for book_dir in bar:
                book_response = requests.post(
                    furl(self.baseurl, path="api/books"),
                    headers=self.HEADERs,
                    json={"name": book_dir.stem},
                )
                self._upload_book(book_response.json(), book_dir)

    # ============下载============
    def _download_book(self, book_info, book_dir: Path):
        # ======获取原始文件树信息======
        page_list_response = requests.get(
            furl(self.baseurl, path="api/books/%s" % book_info["id"]),
            headers=self.HEADERs,
        )
        page_list_data = page_list_response.json()["contents"]
        # ======整理文件树信息======
        page_infos = {}
        chapters = set()
        for object_info in page_list_data:
            if object_info.get("type") == "chapter":
                chapters.add(object_info["name"])
                page_infos.update(
                    {
                        page_info["id"]: {
                            "name": page_info["name"],
                            "mtime": _from_bookstack_datetime(page_info["updated_at"]),
                            "path": book_dir.joinpath(
                                object_info["name"], page_info["name"]
                            ).with_suffix(".md"),
                        }
                        for page_info in object_info["pages"]
                    }
                )
            else:
                page_infos[object_info["id"]] = {
                    "name": object_info["name"],
                    "mtime": _from_bookstack_datetime(object_info["updated_at"]),
                    "path": book_dir.joinpath(object_info["name"]).with_suffix(".md"),
                }
        # 从page_infos列表中去掉本地已有的文件
        page_infos = {
            id: pi
            for id, pi in page_infos.items()
            if (
                not pi["path"].exists()
                or _fromtimestamp_with_tz(pi["path"].stat().st_mtime) < pi["mtime"]
            )
        }
        page_count = len(page_infos)
        # ======多线程请求页面内容======
        with Pool(self.thread_count) as pool:
            page_content_results = Queue(page_count)
            for page_id in page_infos.keys():
                task = pool.submit(
                    # 获取包含页面内容的page_info
                    lambda pgid: requests.get(
                        furl(self.baseurl, path="api/pages/%s" % pgid),
                        headers=self.HEADERs,
                    ).json(),
                    page_id,
                )
                task.add_done_callback(lambda t: page_content_results.put(t.result()))
            # ======创建所有本地文件夹======
            for c in chapters:
                book_dir.joinpath(c).mkdir(exist_ok=True)
            # ======输出文本======
            for i in trange(
                page_count,
                desc=book_info["name"],
                unit="页",
                disable=page_count <= 1,
                ascii=True,
                leave=False,
            ):
                pi = page_content_results.get()
                my_info = page_infos[pi["id"]]
                my_info["path"].write_text(pi["markdown"], "utf-8")

    def download_books(self, savedir: Path, book_names=()):
        "假设云端书名不会重复，book_names为空序列则下载所有书"
        book_list_response = requests.get(
            furl(self.baseurl, path="api/books"), headers=self.HEADERs
        )
        book_list_data = book_list_response.json()["data"]
        if len(book_names) > 0:
            book_names = set(book_names)
            book_infos = sorted(
                filter(lambda j: j["name"] in book_names, book_list_data),
                key=lambda i: i["name"],
            )
        else:
            book_infos = book_list_data
        with tqdm(book_infos, "所有书籍", ascii=True, unit="书") as bar:
            for book_info in bar:
                book_dir = savedir.joinpath(book_info["name"])
                book_dir.mkdir(parents=True, exist_ok=True)
                self._download_book(book_info, book_dir)
