import requests
from bs4 import BeautifulSoup
import json
from tqdm import tqdm
import time


class NovelScraper:
    """小说爬虫类，用于从网站抓取小说内容"""

    def __init__(self, headers=None, delay=2):
        """
        初始化爬虫
        :param headers: 请求头
        :param delay: 请求间隔时间（秒）
        """
        self.headers = headers or {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36'
        }
        self.delay = delay

    def get_html_content(self, url, timesleep=None):
        """
        获取指定URL的HTML内容
        :param url: 目标URL
        :param timesleep: 延迟时间（秒）
        :return: HTML内容或None
        """
        sleep_time = timesleep if timesleep is not None else self.delay
        try:
            response = requests.get(url, headers=self.headers, timeout=100)
            response.raise_for_status()
            time.sleep(sleep_time)
            return response.text
        except requests.exceptions.RequestException as e:
            print(f"Error fetching URL {url}: {e}")
            return None

    def parse_chapter_list(self, html_content):
        """
        解析HTML内容，提取章节名称和对应的URL
        :param html_content: HTML内容
        :return: 章节字典 {章节名称: 对应链接}
        """
        soup = BeautifulSoup(html_content, 'html.parser')
        chap_dict = {}
        try:
            chapter_dl = soup.find_all('dl')[1]
            for tag in chapter_dl.find_all('dd'):
                if tag.a and tag.a.string != '<<---展开全部章节--->>':
                    chap_name = tag.a.string
                    # 构造绝对URL
                    href = 'https://www.c2186a.sbs/' + tag.a['href']
                    chap_dict[chap_name] = href
        except (IndexError, KeyError) as e:
            print(f"Could not find the expected chapter list structure: {e}")
        return chap_dict

    def parse_novel_metadata(self, html_content, chap_url):
        """
        解析HTML内容，提取小说元数据（作者、描述）
        :param html_content: HTML内容
        :param chap_url: 小说主页链接
        :return: 包含作者等信息的字典
        """
        soup = BeautifulSoup(html_content, 'html.parser')
        author = None
        desc = None
        try:
            author_tag = soup.find("div", {"class": 'small'})
            if author_tag and author_tag.span:
                author = author_tag.span.string

            desc_tag = soup.find("div", {"class": "intro"})
            if desc_tag and desc_tag.dl and desc_tag.dl.dd:
                desc = desc_tag.dl.dd.string
        except AttributeError as e:
            print(f"Could not find expected metadata elements: {e}")

        return {
            "author": author,
            "desc": desc,
            "index_href": chap_url,
        }

    def parse_chapter_content(self, html_content):
        """
        解析章节的HTML内容，提取正文
        :param html_content: 章节HTML内容
        :return: 小说正文内容
        """
        soup = BeautifulSoup(html_content, 'html.parser')
        content = ""
        try:
            content_div = soup.find(id='chaptercontent')
            if content_div:
                content = content_div.get_text().replace('\u3000', '')
        except AttributeError as e:
            print(f"Could not find the chapter content div: {e}")
        return content

    def save_novel_data(self, data, filename):
        """
        将提取的小说数据保存到JSON文件
        :param data: 小说数据
        :param filename: 保存的文件名
        """
        try:
            with open(filename, 'w', encoding='utf-8') as f:
                json.dump(data, f, ensure_ascii=False, indent=4)
            print(f"Novel data saved successfully to {filename}")
        except IOError as e:
            print(f"Error saving file {filename}: {e}")

    def scrape_novel(self, chap_url, filename, chapter_limit=20):
        """
        执行小说抓取的主要流程
        :param chap_url: 小说目录页URL
        :param filename: 保存的文件名
        :param chapter_limit: 限制抓取的章节数量
        """
        print(f"Fetching main chapter page from: {chap_url}")
        resp_chap_text = self.get_html_content(chap_url)
        if not resp_chap_text:
            return

        chap_dict = self.parse_chapter_list(resp_chap_text)
        if not chap_dict:
            print("No chapters found. Exiting.")
            return

        novel_metadata = self.parse_novel_metadata(resp_chap_text, chap_url)
        novel_dict = {
            "author": novel_metadata["author"],
            "desc": novel_metadata["desc"],
            "index_href": novel_metadata["index_href"],
            "content": []
        }

        print("Starting to download chapter content...")
        chapters_processed = 0
        for chap_name, chap_href in tqdm(chap_dict.items(), desc="Downloading chapters"):
            if chapters_processed >= chapter_limit:
                print(f"Reached {chapter_limit} chapters limit. Stopping download.")
                break

            chapter_content_text = self.get_html_content(chap_href, timesleep=60)
            if chapter_content_text:
                content = self.parse_chapter_content(chapter_content_text)
                novel_dict["content"].append({chap_name: content})
                chapters_processed += 1
            else:
                print(f"Skipping chapter {chap_name} due to fetch error.")

        self.save_novel_data(novel_dict, filename)


def main():
    """主函数"""
    chap_url = "https://m.3b6aa4560.sbs/book/82506/"
    filename = "机武风暴.json"
    
    # 创建爬虫实例
    scraper = NovelScraper()
    # 开始抓取小说
    scraper.scrape_novel(chap_url=chap_url, filename=filename)


if __name__ == '__main__':
    main()