# content_scraper.py
from importlib import metadata

import requests
from bs4 import BeautifulSoup

from sqlalchemy import Table

from spiders.db.dbmysql import session


# 检查章节是否已爬取
def is_chapter_scraped(chapter_url):
    chapter_table = Table('chapters', metadata, autoload_with=engine)
    query = session.query(chapter_table).filter_by(url=chapter_url)
    return session.query(query.exists()).scalar()


# 爬取章节内容
def scrape_chapter_content(chapter_url):
    if is_chapter_scraped(chapter_url):
        print(f"Chapter already scraped: {chapter_url}")
        return

    response = requests.get(chapter_url)
    soup = BeautifulSoup(response.content, 'html.parser')
    content = soup.find('div', class_='chapter-content').text

    update_chapter_content(chapter_url, content)


# 更新数据库中的章节内容
def update_chapter_content(chapter_url, content):
    chapter_table = Table('chapters', metadata, autoload_with=engine)
    update = chapter_table.update().where(chapter_table.c.url == chapter_url).values(content=content)
    session.execute(update)
    session.commit()
