import argparse
import sys
import requests
from bs4 import BeautifulSoup
import json
import os
import time
import random
from urllib.parse import urlparse, parse_qs


HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}

# Create output directory
output_dir = "./scraped_novels"
os.makedirs(output_dir, exist_ok=True)


def scrape_chapter(url):
    response = requests.get(url, headers=HEADERS)
    response.encoding = "gbk"
    markup = response.text
    soup = BeautifulSoup(markup, "html.parser")

    data = {
        "content": "",
        "comments": None
    }

    try:
        # (Disfunctional) Author Comments/Notes
        note_div = soup.find('div', class_='note_main')
        if note_div:
            data["comments"] = note_div.get_text(
                separator='\n', strip=True)

        # Chapter Content (main text)
        content_div = soup.find(
            'div', {'onselectstart': 'return false', 'oncopy': 'return false'})
        if content_div:
            # Clean up ads and unnecessary elements
            for element in content_div(['script', 'style', 'div', 'a', 'img']):
                element.decompose()
            data["content"] = content_div.get_text(
                separator='\n', strip=True)

    except Exception as e:
        print(f"Error scraping chapter: {str(e)}")

    return data["content"], data["comments"]


def get_novel_links(url):
    response = requests.get(url, headers=HEADERS)
    response.encoding = 'utf-8'
    markup = response.text
    soup = BeautifulSoup(markup, "html.parser")

    # Select all rows except the header
    rows = soup.select("tbody tr")[1:]

    novel_links = []
    for row in rows:
        # Get word count from 5th column
        word_count_td = row.select_one("td:nth-of-type(5)")

        try:
            # Extract and convert word count text to integer
            word_count = int(word_count_td.text.strip())
        except (AttributeError, ValueError):
            # Skip if word count is missing or invalid
            continue

        # Filter condition: only include novels with >= 20,000 words
        if word_count < 20000:
            continue

        # Get the <a> tag in the second column (作品 column)
        novel_link_tag = row.select_one("td:nth-of-type(2) a")
        if novel_link_tag and "onebook.php" in novel_link_tag["href"]:
            full_url = f"https://www.jjwxc.net/{novel_link_tag['href']}"
            novel_links.append(full_url)

    return novel_links


def get_novel_id(url):
    """Extract novel ID from URL"""
    parsed = urlparse(url)
    params = parse_qs(parsed.query)
    return params.get('novelid', ['unknown'])[0]


def save_novel_as_text(novel_data):
    """Save novel data to text file"""
    filename = f"{output_dir}/{novel_data["id"]}-{novel_data["title"]}.txt"

    with open(filename, 'w', encoding='utf-8') as f:
        # Write novel metadata
        f.write(f"作品: {novel_data.get('title', '')}\n")
        f.write(f"作者: {novel_data.get('author', '')}\n")
        f.write(f"类型: {novel_data.get('genre', '')}\n")
        f.write(f"进度: {novel_data.get('status', '')}\n")
        f.write(f"字数: {novel_data.get('word_count', 0)}\n")
        f.write(f"URL: {novel_data['url']}\n\n")

        # Summary Section
        f.write("============================ 文案 ============================\n")
        f.write(novel_data.get('summary', 'No synopsis available') + "\n\n")

        f.write(f"内容标签: {' '.join(novel_data.get('tags', []))}\n")
        f.write(f"主角: {', '.join(novel_data.get('characters', []))}\n")
        f.write(f"一句话简介: {novel_data.get('brief_intro', '')}\n")
        f.write(f"立意: {novel_data.get('themes', '')}\n\n")

        # Write chapters
        for chapter in novel_data.get('chapters', []):
            title = f" {chapter.get('number', '')}: {chapter.get('title', '')} "
            count = int((60-len(title))/2)
            f.write("="*count + title + "="*count + "\n")
            f.write(f"URL: {chapter['url']}\n")
            f.write(f"字数: {chapter.get('word_count', 0)}\n")
            f.write("\n")
            f.write(chapter.get('content', '') + "\n")
            if chapter.get('comments', None):
                f.write("\n-- 作者有话说 --\n")
                f.write(chapter['comments'] + "\n")
            f.write("\n" + "="*60 + "\n\n")


def scrape_novel(url):
    print(f"Processing novel: {url}")
    response = requests.get(url, headers=HEADERS)
    response.encoding = "gbk"
    markup = response.text
    soup = BeautifulSoup(markup, "html.parser")
    # Extract novel metadata
    novel = {
        "url": url,
        "id": get_novel_id(url),
        "title": soup.find("h1", itemprop="name").find("span").text.strip(),
        "author": soup.find("h2").find("a").text.strip(),
        "genre": soup.find("span", itemprop="genre").text.strip(),
        "brief_intro": "",  # 一句话简介
        "tags": [
            a.text for a in soup.select('div.smallreadbody span > a[style*="red"]')
        ],  # 内容标签
        "characters": [],  # 主角
        "themes": "",  # 立意
        "summary": soup.find("div", id="novelintro")
        .get_text(separator="\n")
        .strip(),  # 文案
        "status": soup.find("span", itemprop="updataStatus").text.strip(),
        "word_count": soup.find("span", itemprop="wordCount").text.strip(),
        "collected_count": soup.find(
            "span", itemprop="collectedCount"
        ).text.strip(),
        "chapters": [],
        "latest_update": None,
    }

    # Extract 一句话简介
    brief_span = soup.find(
        'span', string=lambda text: text and '一句话简介' in text)
    if brief_span:
        # Extract text after "一句话简介："
        novel["brief_intro"] = brief_span.text.split('：', 1)[-1].strip()

    # Characters (主角)
    characters = soup.find_all('div', class_='character_name')
    novel['characters'] = [c.text.strip()
                           for c in characters if c.text.strip()]

    # Themes (立意)
    themes_span = soup.find('span', string=lambda t: t and '立意' in t)
    if themes_span:
        novel['themes'] = themes_span.text.split('：', 1)[-1].strip()

    # Select all rows except the header
    rows = soup.select('tr[itemprop~="chapter"]')
    for row in rows:
        cols = row.find_all("td")

        update_time = cols[-1].find("span").text.strip()
        # Check if this is the latest chapter
        if "newestChapter" in row["itemprop"]:
            novel["latest_update"] = update_time
            is_latest = True
        else:
            is_latest = False

        if len(cols) < 6:
            continue  # Skip VIP chapters

        if not cols[1].find("a", itemprop="url"):
            continue  # Skip 等待进入网审 chapters

        chapter = {
            "number": cols[0].text.strip(),
            "title": cols[1].get_text(separator=" ", strip=True),
            "url": cols[1].find("a", itemprop="url")["href"],
            "preview": cols[2].text.strip(),
            "word_count": cols[3].text.strip(),
            "update_time": update_time,
            "is_latest": is_latest
        }

        # Check if this is the latest chapter
        if "newestChapter" in row["itemprop"]:
            novel["latest_update"] = chapter["update_time"]
            chapter["is_latest"] = True
        else:
            chapter["is_latest"] = False

        try:
            print(
                f"Scraping {novel['title']} 第{chapter['number']}章 {chapter['title']}...")
            chapter["content"], _ = scrape_chapter(
                chapter["url"])
            novel["chapters"].append(chapter)
            time.sleep(1)  # Rate-limiting
        except Exception as e:
            print(f"Error scraping chapter {chapter["url"]}: {str(e)}")
            continue

    # Save immediately after scraping novel
    save_novel_as_text(novel)
    print(f"Scraped {novel['title']}")

    return novel


def parse_arguments():
    """Parse command-line arguments"""
    parser = argparse.ArgumentParser(
        description='晋江文学城 Novel Scraper',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter
    )
    parser.add_argument(
        '--url',
        type=str,
        help='Base URL for novel lists'
    )
    parser.add_argument(
        '--pages',
        type=int,
        default=10,
        help='Number of pages to scrape'
    )
    return parser.parse_args()


def main(base_url, pages=10):
    novels = []
    # Paginate through specified pages (1 - pages)
    for page_num in range(1, pages + 1):
        # Build URL with current page number
        url = f"{base_url}&page={page_num}"

        try:
            print(f"Scraping page {page_num}/{pages}...")
            novel_links = get_novel_links(url)
            print(novel_links)
            for link in novel_links:
                try:
                    novel_data = scrape_novel(link)
                    novels.append(novel_data)
                    # Random delay between novels
                    time.sleep(random.uniform(1, 3))
                except Exception as e:
                    print(f"Error processing novel {link}: {str(e)}")
                    continue
        except Exception as e:
            print(f"Error scraping page {page_num}: {str(e)}")
            continue

   # Save to JSON
    with open("novels.json", "w", encoding="utf-8") as f:
        json.dump(novels, f, ensure_ascii=False, indent=2)

    print("Scraping completed. Data saved to ./scraped_novels and ./novels.json")


if __name__ == "__main__":
    args = parse_arguments()
    try:
        main(args.url, args.pages)
    except KeyboardInterrupt:
        print("\nScraping interrupted by user")
        sys.exit(1)
