# chapter_scraper.py
from importlib import metadata

import requests
from bs4 import BeautifulSoup

from sqlalchemy import Table

from spiders.db.dbmysql import session
from spiders.novelCrawler.novel_scraper import dynamic_execution


# 爬取小说章节目录
def scrape_chapter_content(index_num, html_content, task):
    soup = BeautifulSoup(html_content, 'html.parser')

    #  获取章节名称，判断章节字数，是否需要收费
    # 1. 获取章节名称
    index_name_code = "index_name =" + task.index_name_pattern
    index_name = dynamic_execution(soup, 'index_name', index_name_code)
    print("获取章节名称:", index_name)
    # 2. 获取章节内容
    content_code = "content =" + task.content_start
    content = dynamic_execution(soup, 'content', content_code)
    # 3. 过滤章节内容
    # 3. 判断章节字数，是否需要收费

    # 5. 保存章节名称 和章节内容

    # save_chapters_to_db(chapter_list)


# 保存章节数据到数据库
def save_chapters_to_db(chapter_list):
    chapter_table = Table('chapters', metadata, autoload_with=engine)
    for chapter in chapter_list:
        insert = chapter_table.insert().values(chapter)
        session.execute(insert)
    session.commit()
