import requests
from bs4 import BeautifulSoup
import json
from tqdm import tqdm


# 一个函数做太多事情不好！
# 1. 一个函数尽可能只做一种事情
# 2. 重复的代码尽可能少
def main():
    chap_dict, content_dict = get_chap(chap_url=chap_url)
    download_content(chap_dict=chap_dict, novel_dict=content_dict, filename=filename)
    

def get_chap(chap_url):
    """
    输入： 章节主页的链接

    输出：章节目录和链接对应的字典，作者等信息的字典
    """
    # 发送get请求，返回<Response 200> 
    #！ 不一定是 200
    resp_chap = requests.get(chap_url, headers=header)
    if resp_chap == 200:
        # 解析 HTML 
        soup_chap = BeautifulSoup(resp_chap.text)

        # 所有的 章节名称和链接
        chap_tags = soup_chap.find_all('dl')[1].find_all('dd')
        chap_dict = {}
        for tag in soup_chap.find_all('dl')[1].find_all('dd'):
            if tag.a.string != '<<---展开全部章节--->>':
                chap_name = tag.a.string
                href = 'https://www.a897756d.cfd/' + tag.a['href']
                chap_dict[chap_name] = href

        author = soup_chap.find("div", {"class": 'small'}).span.string
        desc = soup_chap.find("div", {"class": "intro"}).dl.dd.string
        index_href = chap_url
        num = index_href.split('/')[-2]
        content_dict = {
            "author": author,
            "desc": desc,
            "index_href": index_href,
            "num": num
        }
        # 列表 元组 字典
        return chap_dict, content_dict
    else:
        print("发生未知错误")

def download_content(chap_dict, novel_dict, filename):
    """
    输入： 章节名称和链接对应的字典，作者等信息的字典
    输出： 无
    """

    # 只解析前20章反而比较麻烦，这里采用比较容易理解的方法
    i = 0
    for chap_name, chap_href in tqdm(chap_dict.items(), desc="当前正在进行下载小说"):
        i = i + 1
        resp_content = requests.get(chap_href, headers=header)
        soup_content = BeautifulSoup(resp_content.text)
        content = soup_content.find(id='chaptercontent').get_text().replace('\u3000', '')
        novel_dict["content"].append({chap_name: content})
        if i >= 20:
            break


    with open(filename, 'w', encoding='utf-8') as f:
        json.dump(novel_dict, f)


if __name__ == '__main__':
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36'
    }
    chap_url = "https://www.a897756d.cfd/book/260609/"
    filename = "寒霜千年.json"
    main()