import requests
from bs4 import BeautifulSoup
import json
from tqdm import tqdm



# 一个函数做太多事情不好！
# 1. 一个函数尽可能只做一种事情
# 2. 重复的代码尽可能少
def main(chap_url, filename):
    """
    1. 解析了所有的章节目录和链接： -> 字典 {章节名称： 章节链接}
    2. 解析了 作者  描述 首页的链接
    3. 解析了 20 章的正文并作了保存


    1,2    3
    """
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36'
    }

    # 发送get请求，返回<Response 200> 
    #！ 不一定是 200
    resp_chap = requests.get(chap_url, headers=header)
    # 解析 HTML 
    soup_chap = BeautifulSoup(resp_chap.text)

    # 所有的 章节名称和链接
    chap_tags = soup_chap.find_all('dl')[1].find_all('dd')
    chap_dict = {}
    for tag in soup_chap.find_all('dl')[1].find_all('dd'):
        if tag.a.string != '<<---展开全部章节--->>':
            chap_name = tag.a.string
            href = 'https://www.a897756d.cfd/' + tag.a['href']
            chap_dict[chap_name] = href

    author = soup_chap.find("div", {"class": 'small'}).span.string
    desc = soup_chap.find("div", {"class": "intro"}).dl.dd.string
    index_href = chap_url
    num = index_href.split('/')[-2]


    #############################################################
    novel_dict = {
        "author": author,
        "desc": desc,
        "index_href": index_href,
        "content": []
    }
    # 只解析前20章反而比较麻烦，这里采用比较容易理解的方法
    i = 0
    for chap_name, chap_href in tqdm(chap_dict.items(), desc="当前正在进行下载小说"):
        i = i + 1
        resp_content = requests.get(chap_href, headers=header)
        soup_content = BeautifulSoup(resp_content.text)
        content = soup_content.find(id='chaptercontent').get_text().replace('\u3000', '')
        novel_dict["content"].append({chap_name: content})
        if i >= 20:
            break


    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(novel_dict, f)


if __name__ == '__main__':
    chap_url = "https://www.a897756d.cfd/book/260609/"
    filename = "寒霜千年.json"
    main(chap_url=chap_url, filename=filename)