import requests
from bs4 import BeautifulSoup
import json
from tqdm import tqdm
import time

def get_html_content(url, headers, timesleep=2):
    """Fetches the HTML content from a given URL.
    输入： 链接 请求头
    输出： html | None
    """
    try:
        response = requests.get(url, headers=headers, timeout=100)
        response.raise_for_status()  # Raise an HTTPError for bad responses (4xx or 5xx)
        time.sleep(timesleep)
        return response.text
    except requests.exceptions.RequestException as e:
        print(f"Error fetching URL {url}: {e}")
        return None

def parse_chapter_list(html_content):
    """Parses the HTML content to extract chapter names and their URLs.
    输入： html
    输出： {章节名称: 对应链接}
    """
    soup = BeautifulSoup(html_content, 'html.parser')
    chap_dict = {}
    # Assuming the structure for chapter links is consistent
    try:
        chapter_dl = soup.find_all('dl')[1]
        for tag in chapter_dl.find_all('dd'):
            if tag.a and tag.a.string != '<<---展开全部章节--->>':
                chap_name = tag.a.string
                # Construct absolute URL, assuming base URL is needed
                href = 'https://www.c2186a.sbs/' + tag.a['href']
                chap_dict[chap_name] = href
    except IndexError:
        print("Could not find the expected chapter list structure.")
    return chap_dict

def parse_novel_metadata(html_content, chap_url):
    """Parses the HTML content to extract novel metadata (author, description).
    输入： html 小说主页链接
    输出： 作者等信息的字典
    """
    soup = BeautifulSoup(html_content, 'html.parser')
    author = None
    desc = None
    try:
        author_tag = soup.find("div", {"class": 'small'})
        if author_tag and author_tag.span:
            author = author_tag.span.string
        
        desc_tag = soup.find("div", {"class": "intro"})
        if desc_tag and desc_tag.dl and desc_tag.dl.dd:
            desc = desc_tag.dl.dd.string
    except AttributeError:
        print("Could not find expected metadata elements.")

    return {
        "author": author,
        "desc": desc,
        "index_href": chap_url,
    }

def parse_chapter_content(html_content):
    """Parses the HTML content of a chapter to extract its main text.
    输入： html（小说每一章的HTML）
    输出： 小说正文: str
    """
    soup = BeautifulSoup(html_content, 'html.parser')
    content = ""
    try:
        content_div = soup.find(id='chaptercontent')
        if content_div:
            content = content_div.get_text().replace('\u3000', '')
    except AttributeError:
        print("Could not find the chapter content div.")
    return content

def save_novel_data(data, filename):
    """Saves the extracted novel data to a JSON file.
    输入： 小说正文: str
    输入： 无
    """
    try:
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=4)
        print(f"Novel data saved successfully to {filename}")
    except IOError as e:
        print(f"Error saving file {filename}: {e}")

def main(chap_url, filename):
    """
    Orchestrates the scraping process by calling specialized functions.
    """
    header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36'
    }

    print(f"Fetching main chapter page from: {chap_url}")
    resp_chap_text = get_html_content(chap_url, header)
    if not resp_chap_text:
        return

    chap_dict = parse_chapter_list(resp_chap_text)
    if not chap_dict:
        print("No chapters found. Exiting.")
        return

    novel_metadata = parse_novel_metadata(resp_chap_text, chap_url)
    novel_dict = {
        "author": novel_metadata["author"],
        "desc": novel_metadata["desc"],
        "index_href": novel_metadata["index_href"],
        "content": []
    }

    print("Starting to download chapter content...")
    chapters_processed = 0
    for chap_name, chap_href in tqdm(chap_dict.items(), desc="Downloading chapters"):
        if chapters_processed >= 20:
            print("Reached 20 chapters limit. Stopping download.")
            break
        
        chapter_content_text = get_html_content(chap_href, header, timesleep=60)
        if chapter_content_text:
            content = parse_chapter_content(chapter_content_text)
            novel_dict["content"].append({chap_name: content})
            chapters_processed += 1
        else:
            print(f"Skipping chapter {chap_name} due to fetch error.")    


    save_novel_data(novel_dict, filename)

if __name__ == '__main__':
    chap_url = "https://m.3b6aa4560.sbs/book/82506/"
    filename = "机武风暴.json"
    main(chap_url=chap_url, filename=filename)