# 通过豆瓣网爬出书籍信息

import os
import requests
from bs4 import BeautifulSoup


class BookSpider:
    """书籍爬虫
    从 https://book.douban.com/subject/35291956/ 中取得书籍信息。
    book = BookSpider(url)
    book.draw()
    book.name...
    """

    def __init__(self, url: str):
        self._url = url
        self._id = self.__parse_id(url)
        self.__has_data = False

    def __parse_id(self, url: str):
        url = url.removesuffix("/")
        return url.split("/")[-1]

    def draw(self):
        header = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36",
            "Cookie": """bid=n8WP-q5tfX0; _vwo_uuid_v2=DF6C8579DCD004C6FB7FE9AE629D54FB1|09a7a891b0cb5b9880d9aed24e3c2bb5; gr_user_id=108fbfc3-3ecf-4290-b195-4a9c0ceb2a05; _ga=GA1.1.2060409648.1683180095; _ga_RXNMP372GL=GS1.1.1683184708.2.0.1683184708.60.0.0; __yadk_uid=fvQNuSlfRdxY87G6IvkSYEDEnDgYxJ4C; ll="118281"; viewed="35960313_36196259_35011789_26869340_3892590_1792387_35934564_27202653_36336510_4780618"; push_noty_num=0; push_doumail_num=0; __utmv=30149280.15671; __utmz=30149280.1691403761.16.10.utmcsr=cn.bing.com|utmccn=(referral)|utmcmd=referral|utmcct=/; _pk_id.100001.3ac3=cb62af92928f413e.1675646803.; __gads=ID=f5c1b9a690eae1fd-22b65e8491d900f2:T=1675646865:RT=1691481906:S=ALNI_MbHotg83U0Kh3qJjaYafPd9Z_RzAQ; __gpi=UID=00000bb67beeb9a9:T=1675646865:RT=1691481906:S=ALNI_MauQj0LlEysXwiC0_aqIFhS1Ir84g; ap_v=0,6.0; __utma=30149280.2060409648.1683180095.1691481890.1692842718.18; __utmc=30149280; dbcl2="156715549:BhJGa6nKt/8"; ck=QtPz; __utma=81379588.2060409648.1683180095.1691481906.1692842868.17; __utmc=81379588; __utmz=81379588.1692842868.17.16.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/search; frodotk_db="11f14bcd8a33cb5c35d3e3689e9b6214"; _pk_ref.100001.3ac3=%5B%22%22%2C%22%22%2C1692842868%2C%22https%3A%2F%2Fwww.douban.com%2Fsearch%3Fcat%3D1001%26q%3Dreact%22%5D""",
        }
        html = requests.get(self._url, headers=header).text
        doc = BeautifulSoup(html, "html.parser")
        self.name = doc.find(attrs={"id": "wrapper"}).find("h1").text.strip()
        authorArchors = (
            doc.find(attrs={"id": "info"})
            .find("span", string=" 作者")
            .parent.find_all("a")
        )
        self.authors = [a.text for a in authorArchors]
        self.releaseDate = (
            doc.find(attrs={"id": "info"}).find("span", string="出版年:").next_sibling.text
        )
        self.isbn = (
            doc.find(attrs={"id": "info"})
            .find("span", string="ISBN:")
            .next_sibling.text
        )
        self.toc = self.__get_toc(doc)
        self.__has_data = True

    def __get_dir_name(self):
        if not self.__has_data:
            self.draw()
        return self.name.replace(" ", "")

    def save(self, dir):
        """保存信息到目录"""
        if not self.__has_data:
            self.draw()

        # 创建目录
        path = os.path.join(dir, self.__get_dir_name())
        os.makedirs(path, exist_ok=True)

        self.__save_info(path)
        self.__save_toc(path)

    def __get_toc(self, document: BeautifulSoup):
        id = f"dir_{self._id}_full"
        toc = document.find(attrs={"id": id}).text
        toc = self.__parse_toc(toc)
        return toc

    def __str__(self) -> str:
        items = []
        items.append(f"书名: {self.name}")
        items.append(f"作者: {' | '.join(self.authors)}")
        items.append(f"出版年: {self.releaseDate}")
        items.append(f"ISBN: {self.isbn}")
        return "\n".join(items)

    def __markdown(self) -> str:
        items = []
        items.append(f"# {self.name}")
        items.append(f"作者: {' | '.join(self.authors)}")
        items.append(f"出版年: {self.releaseDate}")
        items.append(f"ISBN: {self.isbn}")
        return "\n".join(items)

    def __save_info(self, path, filename="001-信息.md"):
        # 创建信息文件
        info = self.__markdown()
        with open(os.path.join(path, filename), mode="w", encoding="utf-8") as f:
            f.write(info)

    def __save_toc(self, path, filename="002-目录.md"):
        # 创建toc文件
        with open(os.path.join(path, filename), mode="w", encoding="utf-8") as f:
            f.write(self.toc)

    def __parse_toc(self, toc: str):
        lines = toc.splitlines()
        # 去掉前后的空格字符
        lines = [line.strip() for line in lines]
        # 全角空格转半角空格
        lines = [line.replace("　", " ") for line in lines]
        # 去掉 `· · · · · ·     (收起)`
        lines = [line.strip() for line in lines if not line.startswith("· · ·")]
        return "\n".join(lines)


def test():
    url = "https://book.douban.com/subject/35799353/"
    target = "./csharp"
    book = BookSpider(url)
    book.save(target)
    print(book)


test()
