import requests
from bs4 import BeautifulSoup
from sqlalchemy.orm import sessionmaker

from spiders.WebDriver.NovelScraper import NovelScraper
from spiders.db.book_service import get_book
from spiders.db.dbmysql import engine
import json

from spiders.db.models import CrawlSingleTask, CrawlSource
from spiders.novelCrawler.chapter_scraper import scrape_chapter_content
from spiders.novelCrawler.novel_scraper import scrape_novel_detail, save_novel_to_db


class SingleTask:
    def __init__(self, update_book_list_url='', book_list_url='', cat_id_rule=None, book_status_rule=None,
                 book_id_pattern='', page_pattern='', total_page_pattern='', book_detail_url='',
                 book_name_pattern='', author_name_pattern='', pic_url_pattern='', status_pattern='',
                 score_pattern='', visit_count_pattern='', desc_start='', desc_end='',
                 update_time_pattern='', update_time_format_pattern='', book_index_url='',
                 index_id_pattern='', index_name_pattern='', book_content_url='', content_start='',
                 content_end='', pic_url_prefix='', book_index_start='', filter_content='',
                 source_book_id='', cat_id=None, book_name='', author_name='', book_id):
        # 更新书籍列表的URL
        self.update_book_list_url = update_book_list_url
        # 书籍列表的URL
        self.book_list_url = book_list_url
        # 类别ID的规则字典，默认为空字典
        self.cat_id_rule = cat_id_rule or {}
        # 书籍状态的规则字典，默认为空字典
        self.book_status_rule = book_status_rule or {}
        # 书籍ID的正则模式
        self.book_id_pattern = book_id_pattern
        # 当前页面的正则模式
        self.page_pattern = page_pattern
        # 总页数的正则模式
        self.total_page_pattern = total_page_pattern
        # 书籍详情页的URL
        self.book_detail_url = book_detail_url
        # 书籍名称的正则模式
        self.book_name_pattern = book_name_pattern
        # 作者名称的正则模式
        self.author_name_pattern = author_name_pattern
        # 图片URL的正则模式
        self.pic_url_pattern = pic_url_pattern
        # 书籍状态的正则模式
        self.status_pattern = status_pattern
        # 评分的正则模式
        self.score_pattern = score_pattern
        # 访问量的正则模式
        self.visit_count_pattern = visit_count_pattern
        # 描述起始的正则模式
        self.desc_start = desc_start
        # 描述结束的正则模式
        self.desc_end = desc_end
        # 更新时间的正则模式
        self.update_time_pattern = update_time_pattern
        # 更新时间格式的正则模式
        self.update_time_format_pattern = update_time_format_pattern
        # 书籍索引页的URL
        self.book_index_url = book_index_url
        # 索引ID的正则模式
        self.index_id_pattern = index_id_pattern
        # 索引名称的正则模式
        self.index_name_pattern = index_name_pattern
        # 书籍内容页的URL
        self.book_content_url = book_content_url
        # 内容起始的正则模式
        self.content_start = content_start
        # 内容结束的正则模式
        self.content_end = content_end
        # 图片URL的前缀
        self.pic_url_prefix = pic_url_prefix
        # 书籍索引起始的正则模式
        self.book_index_start = book_index_start
        # 需要过滤的内容
        self.filter_content = filter_content

        # 新增的属性
        # 来源书籍ID
        self.source_book_id = source_book_id
        # 类别ID
        self.cat_id = cat_id
        # 书籍名称
        self.book_name = book_name
        # 作者名称
        self.author_name = author_name
        # book_id
        self.book_id = book_id


def get_tasks_with_rules():
    session = sessionmaker(bind=engine)()
    try:
        # 查询状态为2的任务
        tasks = session.query(CrawlSingleTask).filter_by(task_status=2).all()
        simple_tasks = []

        for task in tasks:
            # 查询对应的来源信息
            source = session.query(CrawlSource).filter_by(id=task.source_id).one_or_none()
            if source:
                rules = json.loads(source.crawl_rule) if source.crawl_rule else {}
                simple_task = SingleTask(
                    update_book_list_url=rules.get('updateBookListUrl', ''),
                    book_list_url=rules.get('bookListUrl', ''),
                    cat_id_rule=rules.get('catIdRule', {}),
                    book_status_rule=rules.get('bookStatusRule', {}),
                    book_id_pattern=rules.get('bookIdPattern', ''),
                    page_pattern=rules.get('pagePattern', ''),
                    total_page_pattern=rules.get('totalPagePattern', ''),
                    book_detail_url=rules.get('bookDetailUrl', ''),
                    book_name_pattern=rules.get('bookNamePattern', ''),
                    author_name_pattern=rules.get('authorNamePattern', ''),
                    pic_url_pattern=rules.get('picUrlPattern', ''),
                    status_pattern=rules.get('statusPattern', ''),
                    score_pattern=rules.get('scorePattern', ''),
                    visit_count_pattern=rules.get('visitCountPattern', ''),
                    desc_start=rules.get('descStart', ''),
                    desc_end=rules.get('descEnd', ''),
                    update_time_pattern=rules.get('updateTimePattern', ''),
                    update_time_format_pattern=rules.get('updateTimeFormatPattern', ''),
                    book_index_url=rules.get('bookIndexUrl', ''),
                    index_id_pattern=rules.get('indexIdPattern', ''),
                    index_name_pattern=rules.get('indexNamePattern', ''),
                    book_content_url=rules.get('bookContentUrl', ''),
                    content_start=rules.get('contentStart', ''),
                    content_end=rules.get('contentEnd', ''),
                    pic_url_prefix=rules.get('picUrlPrefix', ''),
                    book_index_start=rules.get('bookIndexStart', ''),
                    filter_content=rules.get('filterContent', ''),

                    # 新增的属性
                    source_book_id=task.source_book_id,
                    cat_id=task.cat_id,
                    book_name=task.book_name,
                    author_name=task.author_name,
                    # book_id
                    book_id=task.book_id
                )
                simple_tasks.append(simple_task)
            else:
                print(f"Task ID: {task.id} has no associated source.")

        return simple_tasks

    except Exception as e:
        print(f"Error querying tasks: {e}")
        return []
    finally:
        session.close()


if __name__ == "__main__":
    # 1. 获取简单任务列表
    single_tasks = get_tasks_with_rules()
    # 2. 爬取小说详情页
    for task in single_tasks:

        # 1.爬取小说详情页
        # 判断详情页是否已爬取,查询book表，是否能查询到
        if task.book_id:
            # 查询book表，是否能查询到,如果能查询到，则跳过
            book = get_book(task.book_id)
            if book is None:
                book = scrape_novel_detail(task)
        # 2.爬取小说章节页
        # 如果book_id为空，则爬取小说章节页
        if book.last_index_id is None:
            # 获取小说章节列表
            # 获取小说章节内容
            # 替换成实际的小说章节页面 URL 和选择器
            start_url = 'https://zbwq.wmg.weimeigu.net/app/index.php?i=16414&c=entry&tid=25699&do=mulu&m=iweite_xiaoshuo'
            chapter_selector = 'ul#html_box li'  # 替换为章节链接的选择器
            next_page_selector = '#next_page a'  # 替换为“下一页”按钮的选择器

            # 初始化并启动爬虫
            scraper = NovelScraper(start_url, chapter_selector, next_page_selector, task)
            scraper.start_scraping()
            # 保存到数据库
            # save_novel_to_db(novel_data)
        # 如果book_id不为空，则爬取小说章节最新内容




    # 保存到数据库
    save_novel_to_db(novel_data)
else:
    print(f"Failed to retrieve data from {book_detail_url}, status code: {response.status_code}")
