# main.py
from spiders.db.dbmysql import get_session, engine
from spiders.novelCrawler.novel_scraper import scrape_novel_detail
from spiders.novelCrawler.chapter_scraper import scrape_chapter_content
from spiders.novelCrawler.content_scraper import scrape_chapter_content
from sqlalchemy import Table, MetaData



def main():
    task = get_schedule_info()

    if task:
        novel_url = task.novel_url
        chapter_list_url = task.chapter_list_url

        # 1. 爬取小说详情页
        scrape_novel_detail()

        # 2. 爬取小说目录
        scrape_chapter_content(chapter_list_url, )

        # 3. 爬取章节内容
        chapter_table = Table('chapters', MetaData(bind=engine), autoload_with=engine)
        chapters = session.query(chapter_table).all()

        for chapter in chapters:
            scrape_chapter_content(chapter.url)


if __name__ == "__main__":
    main()
