#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2024/12/29 14:26
# @Author : George
"""
https://www.gushiwen.cn/guwen/Default.aspx?p=1&type=%e5%b0%8f%e8%af%b4%e5%ae%b6%e7%b1%bb

第二层
https://www.gushiwen.cn/guwen/book_4e6b88d8a0bc.aspx
https://www.gushiwen.cn/guwen/book_a09880163008.aspx

第三層
https://www.gushiwen.cn/guwen/bookv_b630af160f65.aspx
"""
import os
import requests
import chardet
from lxml import etree
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
import time
from typing import Dict, List, Tuple

class NovelDownloader:
    def __init__(self):
        self.output_dir = "./novels"
        os.makedirs(self.output_dir, exist_ok=True)
        
        self.headers = {
            "Referer": "https://www.gushiwen.cn/guwen/Default.aspx?p=1",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36"
        }
        self.base_url = "https://www.gushiwen.cn"
        self.home_url = f"{self.base_url}/guwen/Default.aspx?p=1&type=%e5%b0%8f%e8%af%b4%e5%ae%b6%e7%b1%bb"

    def get_html_tree(self, url: str) -> etree._Element:
        """获取页面HTML并返回etree对象"""
        response = requests.get(url, headers=self.headers)
        if response.status_code != 200:
            raise Exception(f"Failed to get {url}, status code: {response.status_code}")
            
        encoding = chardet.detect(response.content)['encoding']
        response.encoding = encoding
        return etree.HTML(response.text)

    def get_chapter_details(self) -> Dict[str, Dict[str, str]]:
        """获取所有章节详情"""
        home_tree = self.get_html_tree(self.home_url)
        
        # 获取书籍链接和标题
        urls = home_tree.xpath("//*[@class='sonspic']/div[1]/p[1]/a[1]/@href")[1:3]
        titles = home_tree.xpath("//*[@class='sonspic']/div[1]/p[1]/a[1]/b/text()")[1:3]
        
        book_details = {}
        for title, url in zip(titles, urls):
            detail_tree = self.get_html_tree(f"{self.base_url}{url}")
            
            # 获取章节链接和标题
            chapter_urls = [f"{self.base_url}{url}" for url in 
                          detail_tree.xpath("//*[@class='bookcont']/ul/span/a/@href")]
            chapter_titles = detail_tree.xpath("//*[@class='bookcont']/ul/span/a/text()")
            
            book_details[title] = dict(zip(chapter_titles, chapter_urls))
            
        return book_details

    def download_novel(self, title: str, chapters: Dict[str, str]):
        """下载单本小说"""
        output_path = os.path.join(self.output_dir, f"{title}.txt")
        print(f"开始下载 {title}".center(100, "="))
        
        with open(output_path, "w", encoding="utf-8") as f:
            for chapter_title, chapter_url in chapters.items():
                response = requests.get(chapter_url, headers=self.headers)
                soup = BeautifulSoup(response.text, "lxml")
                
                f.write(f"{chapter_title}:\n")
                for paragraph in soup.select('.contson > p'):
                    f.write(f"{paragraph.text}\n")
                f.write("\n\n")
                
                print(f"{chapter_title} 下载完成".center(20, "-"))
                
        print(f"{title} 下载完成".center(100, "="))

def main():
    start_time = time.time()
    
    downloader = NovelDownloader()
    books = downloader.get_chapter_details()
    
    # 使用线程池并行下载
    with ThreadPoolExecutor(max_workers=10) as pool:
        futures = [pool.submit(downloader.download_novel, title, chapters) 
                  for title, chapters in books.items()]
        
    print(f"总耗时: {time.time() - start_time:.2f}秒")

if __name__ == '__main__':
    main()
