import requests
from bs4 import BeautifulSoup
import json
from tqdm import tqdm
import time

class NovelScraper:
    """
    一个用于爬取小说信息和内容的类。
    """
    def __init__(self, base_url, headers=None, timesleep=2):
        """
        初始化小说爬取器。
        :param base_url: 小说网站的基础 URL，用于拼接章节链接。
        :param headers: 请求头，默认为常用浏览器 User-Agent。
        """
        self.base_url = base_url.rstrip('/') + '/' # 确保 base_url 以 / 结尾
        self.headers = headers if headers else {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36'
        }
        self.timesleep = timesleep
        self.novel_data = {
            "author": None,
            "desc": None,
            "index_href": None,
            "content": []
        }
        self.chapters_parsed_count = 0 # 记录已解析的章节数量

    def _get_html_content(self, url):
        """
        私有方法：获取指定 URL 的 HTML 内容。
        """
        try:
            response = requests.get(url, headers=self.headers, timeout=10)
            status_code = response.status_code
            response.raise_for_status() # 检查 HTTP 状态码
            time.sleep(self.timesleep)
            return response.text
        except requests.exceptions.RequestException as e:
            print(f"Error fetching URL {url}: {e}")
            return None

    def _parse_chapter_list(self, html_content):
        """
        私有方法：从 HTML 内容中解析章节名称和链接。
        """
        soup = BeautifulSoup(html_content, 'html.parser')
        chap_dict = {}
        try:
            # 假设章节列表在第二个 dl 标签中
            chapter_dl = soup.find_all('dl')[1]
            for tag in chapter_dl.find_all('dd'):
                if tag.a and tag.a.string != '<<---展开全部章节--->>':
                    chap_name = tag.a.string
                    # 拼接完整的章节 URL
                    href = self.base_url + tag.a['href'].lstrip('/')
                    chap_dict[chap_name] = href
        except IndexError:
            print("Warning: Could not find the expected chapter list structure.")
        return chap_dict

    def _parse_novel_metadata(self, html_content, index_href):
        """
        私有方法：从 HTML 内容中解析小说元数据（作者、描述）。
        """
        soup = BeautifulSoup(html_content, 'html.parser')
        author = None
        desc = None
        try:
            author_tag = soup.find("div", {"class": 'small'})
            if author_tag and author_tag.span:
                author = author_tag.span.string

            desc_tag = soup.find("div", {"class": "intro"})
            if desc_tag and desc_tag.dl and desc_tag.dl.dd:
                desc = desc_tag.dl.dd.string
        except AttributeError:
            print("Warning: Could not find expected metadata elements.")

        self.novel_data["author"] = author
        self.novel_data["desc"] = desc
        self.novel_data["index_href"] = index_href

    def _parse_chapter_content(self, html_content):
        """
        私有方法：从章节 HTML 内容中提取正文。
        """
        soup = BeautifulSoup(html_content, 'html.parser')
        content = ""
        try:
            content_div = soup.find(id='chaptercontent')
            if content_div:
                content = content_div.get_text().replace('\u3000', '')
        except AttributeError:
            print("Warning: Could not find the chapter content div.")
        return content

    def scrape_novel(self, chap_url, max_chapters=20):
        """
        执行小说爬取的主要方法。
        :param chap_url: 章节目录页的 URL。
        :param max_chapters: 最大爬取章节数，默认为 20。
        """
        print(f"Fetching main chapter page from: {chap_url}")
        main_page_html = self._get_html_content(chap_url)
        if not main_page_html:
            print("Failed to fetch main chapter page. Exiting.")
            return False

        self._parse_novel_metadata(main_page_html, chap_url)
        chap_dict = self._parse_chapter_list(main_page_html)

        if not chap_dict:
            print("No chapters found. Exiting.")
            return False

        print(f"Starting to download up to {max_chapters} chapter(s)...")
        for chap_name, chap_href in tqdm(chap_dict.items(), desc="Downloading chapters"):
            if self.chapters_parsed_count >= max_chapters:
                print(f"Reached {max_chapters} chapters limit. Stopping download.")
                break

            chapter_html = self._get_html_content(chap_href)
            if chapter_html:
                content = self._parse_chapter_content(chapter_html)
                self.novel_data["content"].append({chap_name: content})
                self.chapters_parsed_count += 1
            else:
                print(f"Skipping chapter '{chap_name}' due to fetch error.")
        return True

    def save_to_json(self, filename):
        """
        将爬取到的小说数据保存到 JSON 文件。
        :param filename: 保存的文件名。
        """
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(self.novel_data, f, ensure_ascii=False, indent=4)
            print(f"Novel data saved successfully to {filename}")
        except IOError as e:
            print(f"Error saving file {filename}: {e}")

# --- 使用示例 ---
if __name__ == '__main__':
    
    output_filename = "寒霜千年_oop.json"
    
    # 网站的基准URL，用于正确拼接章节链接
    # 注意：根据实际情况，这个 base_url 可能需要调整
    # 比如这里，如果章节链接是 /book/260609/123.html，那么 base_url 应该是 https://www.a897756d.cfd
    # 如果章节链接已经是完整的URL，那么 base_url 可以是任何有效值或者不用于拼接
    base_site_url = "https://www.c2186a.sbs/" 

    chapter_list_url = base_site_url + "book/260609/"

    # 创建爬取器实例
    scraper = NovelScraper(base_url=base_site_url)

    # 开始爬取小说
    success = scraper.scrape_novel(chap_url=chapter_list_url, max_chapters=4)

    # 如果爬取成功，则保存数据
    if success:
        scraper.save_to_json(output_filename)