import schedule
import time
import logging
import os
import re
from bs4 import BeautifulSoup
from request_utils import ask_url
from save_to_csv_utils import save_csv


class NewMovieSpider:
    """豆瓣新片榜爬虫类"""

    def __init__(self, base_url, csv_path):
        """初始化爬虫参数"""
        self.base_url = base_url
        self.csv_path = csv_path

        # 配置日志
        self.logger = logging.getLogger(__name__)
        self.logger.setLevel(logging.INFO)
        formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')

        file_handler = logging.FileHandler('../spider.log', encoding='utf-8')
        file_handler.setFormatter(formatter)
        self.logger.addHandler(file_handler)

        # 正则表达式
        self.FIND_LIST = re.compile(r'<a href="(.*?)">')
        self.FIND_IMG = re.compile(r'<img[^>]*src="([^"]+)"[^>]*>', re.S)
        self.FIND_TITLE = re.compile(r'<img[^>]*alt="([^"]+)"[^>]*>')
        self.FIND_TITLE2 = re.compile(r'<span style="font-size:13px;">(.*?)</span>')
        self.FIND_TBG = re.compile(r'<p>(.*?)</p>', re.S)
        self.FIND_RATING = re.compile(r'<span class="rating_nums">(.*?)</span>')
        self.FIND_JUDGE = re.compile(r'\((\d+)人评价\)')

        # 数据存储
        self.movie_data = []

    def fetch_data(self):
        """获取网页数据"""
        self.logger.info(f"开始爬取数据: {self.base_url}")
        print(f"开始爬取数据: {self.base_url}")

        html = ask_url(self.base_url)
        if not html:
            self.logger.error("获取网页内容失败")
            return False

        self._parse_data(html)
        return True

    def _parse_data(self, html_content):
        """解析网页数据"""
        self.movie_data = []
        soup = BeautifulSoup(html_content, 'html.parser')

        for item in soup.find_all('tr', class_="item"):
            try:
                item_str = str(item)
                data = self._parse_movie_item(item_str)
                self.movie_data.append(data)
            except Exception as e:
                self.logger.error(f"解析电影项失败: {e}")
                continue

        self.logger.info(f"共解析 {len(self.movie_data)} 条电影数据")

    def _parse_movie_item(self, item_str):
        """解析单个电影项"""
        data = []

        # 获取影片详情的链接
        link = re.findall(self.FIND_LIST, item_str)[0]
        data.append(link)

        # 获取影片图片链接
        img_src = re.findall(self.FIND_IMG, item_str)[0]
        data.append(img_src)

        # 获取影片中文片名
        c_title = re.findall(self.FIND_TITLE, item_str)[0]
        data.append(c_title)

        # 获取影片其他片名
        o_title = re.findall(self.FIND_TITLE2, item_str)[0]
        data.append(o_title)

        # 电影背景信息
        bd = re.findall(self.FIND_TBG, item_str)[0]
        data.append(bd)

        # 获取影片评分
        rating = re.findall(self.FIND_RATING, item_str)
        data.append(rating[0] if rating else " ")

        # 获取评价人数
        judge_num = re.findall(self.FIND_JUDGE, item_str)
        data.append(judge_num[0] if judge_num else " ")

        return data

    def save_data(self):
        """保存数据到CSV文件"""
        if not self.movie_data:
            self.logger.warning("没有数据可保存")
            return False

        headers = ('电影详情链接', '电影图片链接', '影片中文名', '影片外国名', '相关内容', '评分', '评价数量')

        try:
            save_csv(self.movie_data, self.csv_path, headers)
            self.logger.info(f"数据已成功保存到 {self.csv_path}")
            print(f"数据已成功保存到 {self.csv_path}")
            return True
        except Exception as e:
            self.logger.error(f"保存数据失败: {e}")
            return False

    def run(self):
        """执行爬虫主流程"""
        self.logger.info("开始执行新电影爬虫程序...")
        print("开始执行新电影爬虫程序...")

        if self.fetch_data() and self.save_data():
            self.logger.info("新电影爬虫程序已执行完毕")
            print("新电影爬虫程序已执行完毕")
            return True
        else:
            self.logger.error("新电影爬虫程序执行失败")
            print("新电影爬虫程序执行失败")
            return False


def schedule_spider():
    """安排定时任务"""
    spider = NewMovieSpider(
        base_url="https://movie.douban.com/chart",
        csv_path="../豆瓣新片榜.csv"
    )

    # 每天凌晨2点10分执行爬虫程序
    schedule.every().day.at("02:10").do(spider.run)

    # 立即执行一次
    spider.run()

    # 持续运行，检查是否有待执行的任务
    while True:
        schedule.run_pending()
        time.sleep(1)


if __name__ == '__main__':
    schedule_spider()