import requests
from bs4 import BeautifulSoup
import os
import re
from urllib.parse import urljoin


# 去重集合
book_urls = set()

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}


# 获取排行榜下所有书籍链接
def get_book_links():
    base_url = 'https://www.biquge.com.cn/'
    response = requests.get(base_url + 'paihangbang/', headers=headers)
    soup = BeautifulSoup(response.text, 'html.parser')
    book_links = soup.select('.l mr10 a')
    for link in book_links:
        book_url = urljoin(base_url, link['href'])
        book_urls.add(book_url)
    return book_urls


# 获取书籍章节列表和书名
def get_chapter_list(book_url):
    response = requests.get(book_url, headers=headers)
    soup = BeautifulSoup(response.text, 'html.parser')
    title = soup.select_one('h1').text
    chapter_links = soup.select('#list dl dd a')
    chapter_list = [(link.text, urljoin(book_url, link['href'])) for link in chapter_links]
    return title, chapter_list


# 下载章节内容
def download_chapter(chapter_url, chapter_name):
    response = requests.get(chapter_url, headers=headers)
    soup = BeautifulSoup(response.text, 'html.parser')
    content = soup.select_one('#content').text
    content = re.sub(r'\s+', '\n', content).strip()
    return content


# 主函数
def main():
    book_links = get_book_links()
    for book_link in book_links:
        title, chapter_list = get_chapter_list(book_link)
        book_folder = os.path.join(os.getcwd(), title)
        if not os.path.exists(book_folder):
            os.makedirs(book_folder)
        for chapter_name, chapter_url in chapter_list:
            chapter_content = download_chapter(chapter_url, chapter_name)
            chapter_file = os.path.join(book_folder, chapter_name + '.txt')
            with open(chapter_file, 'w', encoding='utf-8') as f:
                f.write(chapter_content)


if __name__ == "__main__":
    main()
