# BY @burpheart
# https://www.yuque.com/burpheart/phpaudit
# https://github.com/burpheart
import sys

import requests
import json
import re
import os
import urllib.parse

tset = []

headers = {
    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
    "Accept-Language": "en,zh-CN;q=0.9,zh-TW;q=0.8,zh;q=0.7,en-US;q=0.6",
    "Cache-Control": "no-cache",
    "Connection": "keep-alive",
    "Pragma": "no-cache",
    "Sec-Fetch-Dest": "document",
    "Sec-Fetch-Mode": "navigate",
    "Sec-Fetch-Site": "none",
    "Sec-Fetch-User": "?1",
    "Service-Worker-Navigation-Preload": "true",
    "Upgrade-Insecure-Requests": "1",
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36",
    "sec-ch-ua": 'Google Chrome";v="129", "Not=A?Brand";v="8", "Chromium";v="129"',
    "sec-ch-ua-mobile": "?0",
    "sec-ch-ua-platform": '"Windows"',
    "Cookie": "receive-cookie-deprecation=1; lang=zh-cn; _yuque_session=cQPbTtJC4-wwB5FweH5l9U4JkNPPj2PbXuAWDqskRBg-zwMLQ_L33jWita_4kNyx19sDnW1ANgLsK4FvFx7tIw==; _uab_collina=172716449083535635631734; tfstk=gQZnMRjl-yuB0KYsNOoCrbau0rb9ApiSR7K-w0hP7fl69BKLUg-ze-4Kz6iLq5V_1WeLeu46q-w7v9iRdDwIV05AMiFudJiWDJzvskvwQx2w4UlPD0VG-J1AMiIT7ua8CsU-k0QySYGZLYurzOYZdf8EL7lE7Ckmh0lza7laQxMHTplraFRZ1Yora7orCvKriur_b6MKwF5jSBqsK2c4LXy8al8IJewts3-zjP0nggcM43riKRTxYStcyjzj9qhY_GKnqRku9fFV0hmZorw-EWjHYDUa5kiLXsvj-7cKul3GudVmx704YVvy2JmoacDa0TLj6lcLak0NHn0-bo3qYPQ194nnn7rQtLxzgReYAVZliCl7JxiZHJsHjmyEUgWe7EWf83MNel8WP2ksIjILfm1_U1nzwOXMlwgECvCAIOYWP2ksIjBGIEOI8AMdM; aliyungf_tc=cb376b743aed3c058bf5c54c6d057e61b75f6cd0299d029891b6f7728802a335; yuque_ctoken=VLZgrbrlXTsFVFbEc-8e0mKR; current_theme=default; acw_tc=ac11000117274238913888445eaa020148d69eef32947570ded4aeb7d0e6d2"
}


def parse_cookies(cookies_str):
    # 字典推导 {key: value for (key, value) in iterable}
    # {key_expr: value_expr for item in iterable}
    return {cookies_str.split('=', 1)[0]: cookies_str.split('=', 1)[-1] for _ in cookies_str.split(';')}


def save_page(book_id, sulg, path):
    url = 'https://www.yuque.com/api/docs/' + sulg + '?book_id=' + book_id + '&merge_dynamic_data=false&mode=markdown'
    print(url)
    docsdata = requests.get(
        url,
        headers=headers)
    if (docsdata.status_code != 200):
        print("文档下载失败 页面可能被删除 ", book_id, sulg, docsdata.content)
        return
    docsjson = json.loads(docsdata.content)

    f = open(path, 'w', encoding='utf-8')
    f.write(docsjson['data']['sourcecode'])
    f.close()


def get_book(url="https://www.yuque.com/bitcatmeng/lpcqha"):
    # 有密码的时候需要指定header
    docsdata = requests.get(url, headers=headers)
    data = re.findall(r"decodeURIComponent\(\"(.+)\"\)\);", docsdata.content.decode('utf-8'))
    docsjson = json.loads(urllib.parse.unquote(data[0]))
    test = []
    list = {}
    temp = {}
    md = ""
    table = str.maketrans('\/:*?"<>|' + "\n\r", "___________")
    prename = ""
    if (os.path.exists("download/" + str(docsjson['book']['id'])) == False):
        os.makedirs("download/" + str(docsjson['book']['id']))

    for doc in docsjson['book']['toc']:
        if (doc['type'] == 'TITLE' or doc['child_uuid'] != ''):
            filename = ''
            list[doc['uuid']] = {'0': doc['title'], '1': doc['parent_uuid']}
            uuid = doc['uuid']
            temp[doc['uuid']] = ''
            while True:
                if (list[uuid]['1'] != ''):
                    if temp[doc['uuid']] == '':
                        temp[doc['uuid']] = doc['title'].translate(table)
                    else:
                        temp[doc['uuid']] = list[uuid]['0'].translate(table) + '/' + temp[doc['uuid']]
                    uuid = list[uuid]['1']
                else:
                    temp[doc['uuid']] = list[uuid]['0'].translate(table) + '/' + temp[doc['uuid']]
                    break
            if ((os.path.exists("download/" + str(docsjson['book']['id']) + '/' + temp[doc['uuid']])) == False):
                os.makedirs("download/" + str(docsjson['book']['id']) + '/' + temp[doc['uuid']])
            if (temp[doc['uuid']].endswith("/")):
                md += "## " + temp[doc['uuid']][:-1] + "\n"
            else:
                md += "  " * (temp[doc['uuid']].count("/") - 1) + "* " + temp[doc['uuid']][
                                                                         temp[doc['uuid']].rfind("/") + 1:] + "\n"
        if (doc['url'] != ''):
            if doc['parent_uuid'] != "":
                if (temp[doc['parent_uuid']].endswith("/")):
                    md += " " * temp[doc['parent_uuid']].count("/") + "* [" + doc['title'] + "](" + urllib.parse.quote(
                        temp[doc['parent_uuid']] + "/" + doc['title'].translate(table) + '.md') + ")" + "\n"
                else:
                    md += "  " * temp[doc['parent_uuid']].count("/") + "* [" + doc['title'] + "](" + urllib.parse.quote(
                        temp[doc['parent_uuid']] + "/" + doc['title'].translate(table) + '.md') + ")" + "\n"
                save_page(str(docsjson['book']['id']), doc['url'],
                          "download/" + str(docsjson['book']['id']) + '/' + temp[doc['parent_uuid']] + "/" + doc[
                              'title'].translate(table) + '.md')
            else:
                md += " " + "* [" + doc['title'] + "](" + urllib.parse.quote(
                    doc['title'].translate(table) + '.md') + ")" + "\n"
                save_page(str(docsjson['book']['id']), doc['url'],
                          "download/" + str(docsjson['book']['id']) + "/" + doc[
                              'title'].translate(table) + '.md')
    f = open("download/" + str(docsjson['book']['id']) + '/' + "/SUMMARY.md", 'w', encoding='utf-8')
    f.write(md)
    f.close()


if __name__ == '__main__':
    """
    命令行运行 python main.py URL
    没有密码可直接下载， 有密码需要复制Header
    """
    if len(sys.argv) > 1:
        get_book(sys.argv[1])
    else:
        get_book()
