# -*- coding: UTF-8 -*-
"""
@Project , trainee
@File    , scheduler.py
@IDE     , PyCharm
@Author  , 2607750505@qq.com
@Date    , 2025/6/20 9:11
"""
# -*- coding: UTF-8 -*-
"""调度爬虫任务的执行"""
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.triggers.cron import CronTrigger
import logging
import time
import random
from spider.Movie_detailed_data import main as movie_main
from spider.Movie_review_data import spider_main as review_main
from spider.clean_duplicate_movies import clean_duplicate_data
from spider.clean_duplicate_comments import clean_duplicate_comments

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
    filename='scheduler.log'
)
logger = logging.getLogger(__name__)

# 配置参数
MOVIE_BATCH_SIZE = 10  # 每次爬取的电影数量
REVIEW_PER_MOVIE = 20  # 每部电影爬取的评论数量
MAX_RETRY = 3  # 最大重试次数


def get_movies_without_reviews(limit):
    """获取尚未爬取评论或评论不足的电影"""
    import pymysql
    conn = pymysql.connect(
        host='localhost',
        user='root',
        password='123456',
        database='movie_system'
    )

    try:
        with conn.cursor() as cursor:
            # 查询评论数量不足的电影
            sql = """
                SELECT m.id, m.commentLen 
                FROM movies m
                LEFT JOIN (
                    SELECT movie_id, COUNT(*) as review_count 
                    FROM comments 
                    GROUP BY movie_id
                ) c ON m.id = c.movie_id
                WHERE m.commentLen > 0 
                AND (c.review_count IS NULL OR c.review_count < %s)
                ORDER BY RAND()  -- 随机选择
                LIMIT %s
            """
            cursor.execute(sql, (REVIEW_PER_MOVIE, limit))
            return cursor.fetchall()
    finally:
        conn.close()


def run_spiders():
    """运行爬虫主函数"""
    logger.info("开始执行爬虫任务...")

    try:
        # 步骤1: 爬取新电影
        logger.info(f"开始爬取 {MOVIE_BATCH_SIZE} 部新电影...")
        movie_main(MOVIE_BATCH_SIZE)

        # 步骤2: 获取需要爬取评论的电影
        logger.info("查询需要爬取评论的电影...")
        movies_to_crawl = get_movies_without_reviews(MOVIE_BATCH_SIZE)

        if not movies_to_crawl:
            logger.info("没有找到需要爬取评论的电影")
        else:
            logger.info(f"找到 {len(movies_to_crawl)} 部需要爬取评论的电影")

            # 步骤3: 为每部电影爬取评论
            for movie_id, comment_len in movies_to_crawl:
                actual_limit = min(REVIEW_PER_MOVIE, int(comment_len))  # 不超过实际评论数
                logger.info(f"开始为电影ID {movie_id} 爬取 {actual_limit} 条评论...")

                # 尝试多次，确保爬取成功
                for attempt in range(MAX_RETRY):
                    try:
                        success = review_main(movie_id, actual_limit)
                        if success:
                            logger.info(f"成功为电影ID {movie_id} 爬取评论")
                            break
                        else:
                            logger.warning(f"第 {attempt + 1} 次尝试为电影ID {movie_id} 爬取评论失败")
                            time.sleep(random.uniform(5, 10))
                    except Exception as e:
                        logger.error(f"爬取评论时发生异常: {e}")
                        time.sleep(random.uniform(5, 10))
                else:
                    logger.error(f"为电影ID {movie_id} 爬取评论失败，已达到最大重试次数")

                # 添加随机延迟，避免请求过于频繁
                time.sleep(random.uniform(2, 5))

        # 步骤4: 清理重复数据
        logger.info("开始清理重复数据...")
        clean_duplicate_data()
        clean_duplicate_comments()

        logger.info("爬虫任务执行完成")

    except Exception as e:
        logger.error(f"爬虫任务执行失败: {str(e)}", exc_info=True)
    finally:
        logger.info("爬虫任务结束")


if __name__ == '__main__':
    # 立即执行一次爬虫任务
    logger.info("首次执行爬虫任务...")
    run_spiders()

    # 创建调度器
    scheduler = BlockingScheduler()

    # 设置每天凌晨2点执行爬虫任务
    scheduler.add_job(
        run_spiders,
        CronTrigger(hour=2),
        id='crawl_movies_and_reviews',
        name='定时爬取电影和评论数据',
        max_instances=1,
        misfire_grace_time=3600  # 如果错过执行时间，允许在1小时内执行
    )

    logger.info("调度器已启动，等待执行定时任务...")

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        logger.info("调度器已停止")
        pass