import requests
from bs4 import BeautifulSoup
import time
import re
import random
import logging
from urllib.parse import unquote
import pymysql
import threading
import argparse
from datetime import datetime

# 配置日志记录器
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 随机User-Agent列表
USER_AGENTS = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.5 Safari/605.1.15',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/120.0.0.0 Safari/537.36',
    'Mozilla/5.0 (iPad; CPU OS 16_5 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.5 Mobile/15E148 Safari/604.1'
]

class DoubanSpider:
    def __init__(self):
        self.running = False
        self.db_config = {
            'host': 'localhost',
            'user': 'root',
            'password': '123456',
            'charset': 'utf8mb4'
        }
        self.session = requests.Session()
        self.base_url = "https://movie.douban.com/top250?start={}"
        self.temp_table = "movies_temp"  # 临时表名
        self.init_mysql()

    def is_database_empty(self):
        """检查数据库是否为空"""
        try:
            # 检查movies表是否存在且有数据
            self.cursor.execute("SELECT COUNT(*) FROM movies")
            count = self.cursor.fetchone()[0]
            return count == 0
        except Exception as e:
            logger.error(f"检查数据库状态失败: {str(e)}")
            return True

    def init_mysql(self):
        """初始化数据库连接和表结构"""
        try:
            self.conn = pymysql.connect(**self.db_config)
            self.cursor = self.conn.cursor()
            
            # 创建数据库
            self.cursor.execute("CREATE DATABASE IF NOT EXISTS douban_movies DEFAULT CHARACTER SET utf8mb4")
            self.cursor.execute("USE douban_movies")
            
            # 创建电影表
            create_table_sql = """
            CREATE TABLE IF NOT EXISTS movies (
                id INT AUTO_INCREMENT PRIMARY KEY,
                rank_num INT NOT NULL,
                title VARCHAR(255) NOT NULL,
                original_title VARCHAR(255),
                rating FLOAT NOT NULL,
                director VARCHAR(255),
                actors TEXT,
                year VARCHAR(10),
                country VARCHAR(100),
                genre VARCHAR(100),
                intro TEXT,
                poster_url TEXT,
                create_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
            """
            self.cursor.execute(create_table_sql)
            
            # 创建临时表
            create_temp_table_sql = f"""
            CREATE TABLE IF NOT EXISTS {self.temp_table} LIKE movies
            """
            self.cursor.execute(create_temp_table_sql)
            
            # 创建进度表
            create_progress_table_sql = """
            CREATE TABLE IF NOT EXISTS crawl_progress (
                page_index INT PRIMARY KEY,
                crawl_time TIMESTAMP DEFAULT CURRENT_TIMESTAMP
            )
            """
            self.cursor.execute(create_progress_table_sql)
            
            # 创建统计表
            create_stats_table_sql = """
            CREATE TABLE IF NOT EXISTS crawl_stats (
                id INT PRIMARY KEY AUTO_INCREMENT,
                total_movies INT DEFAULT 0,
                last_update TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
            )
            """
            self.cursor.execute(create_stats_table_sql)
            
            # 初始化统计表
            self.cursor.execute("INSERT IGNORE INTO crawl_stats (id, total_movies) VALUES (1, 0)")
            self.conn.commit()
            
            # 检查数据库是否为空，如果为空则执行首次爬取
            if self.is_database_empty():
                logger.info("数据库为空，执行首次爬取...")
                self.crawl(reset=True)
            else:
                logger.info("数据库已存在数据，跳过首次爬取")
                
        except Exception as e:
            logger.error(f"初始化数据库失败: {str(e)}")
            raise

    def get_random_ua(self):
        return random.choice(USER_AGENTS)

    def get_headers(self):
        return {
            'User-Agent': self.get_random_ua(),
            'Accept-Language': random.choice(['zh-CN,zh;q=0.9', 'en-US,en;q=0.8,zh-CN;q=0.7']),
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Referer': random.choice([
                'https://movie.douban.com/',
                'https://movie.douban.com/top250',
                'https://www.douban.com/',
                'https://www.google.com/'
            ])
        }

    def clean_text(self, text):
        if not text:
            return ""
        text = unquote(text)
        text = re.sub(r'\s+', ' ', text)
        text = re.sub(r'[/\\]+', '/', text)
        text = re.sub(r'\s*/\s*', '/', text)
        text = re.sub(r'\.{3,}', '', text)
        text = re.sub(r'\s+$', '', text)
        return text.strip()

    def get_page_data(self, url, retry_count=3):
        for attempt in range(retry_count):
            try:
                delay = random.uniform(15, 30)
                logger.info(f"等待 {delay:.2f} 秒后发起请求...")
                time.sleep(delay)
                
                current_headers = self.get_headers()
                cookies = {
                    'bid': ''.join(random.choice('0123456789abcdef') for _ in range(11)),
                    'll': '"108288"',
                    'ct': str(int(time.time() * 1000))
                }
                
                logger.info(f"正在请求: {url}, 尝试次数: {attempt+1}/{retry_count}")
                response = self.session.get(
                    url, 
                    headers=current_headers, 
                    cookies=cookies, 
                    timeout=15,
                    verify=True  # 验证SSL证书
                )
                
                if "验证码" in response.text or response.status_code == 403:
                    logger.warning("可能被限制访问，等待更长时间...")
                    time.sleep(random.uniform(60, 120))
                    continue
                    
                response.raise_for_status()
                logger.info(f"成功获取页面，大小: {len(response.text)} 字节")
                
                soup = BeautifulSoup(response.text, 'html.parser')
                items = soup.find_all('div', class_='item')
                logger.info(f"找到 {len(items)} 个电影项")
                
                if not items:
                    title_elem = soup.find('title')
                    if title_elem:
                        logger.warning(f"页面标题: {title_elem.get_text()}")
                    logger.warning("页面未包含电影列表，可能被限制或页面结构变化")
                    if attempt < retry_count - 1:
                        time.sleep(random.uniform(30, 60))
                        continue
                
                movie_list = []
                
                for item in items:
                    try:
                        # 提取排名
                        rank_elem = item.find('em')
                        if not rank_elem:
                            continue
                        rank = rank_elem.get_text()
                        
                        # 提取电影标题
                        titles = item.find_all('span', class_='title')
                        main_title = titles[0].get_text() if titles else ""
                        other_title = ""
                        if len(titles) > 1:
                            other_title = titles[1].get_text().replace('/', '').strip()
                        
                        # 提取评分
                        rating_elem = item.find('span', class_='rating_num')
                        if not rating_elem:
                            continue
                        rating = rating_elem.get_text()
                        
                        # 提取导演和演员信息
                        info_div = item.find('div', class_='bd')
                        if not info_div:
                            continue
                        info_p = info_div.find('p')
                        
                        director = ""
                        actors = ""
                        if info_p and info_p.contents:
                            director_actor_text = info_p.contents[0].strip()
                            director_match = re.search(r'导演:\s*(.*?)(?:\s*主演:|$)', director_actor_text)
                            if director_match:
                                director = director_match.group(1).strip()
                            actors_match = re.search(r'主演:\s*(.*?)$', director_actor_text)
                            if actors_match:
                                actors = actors_match.group(1).strip()
                        
                        # 提取年份/国家/类型信息
                        year = ""
                        country = ""
                        genre = ""
                        if info_p and len(info_p.contents) > 2:
                            year_country_genre = info_p.contents[2].strip()
                            year_match = re.search(r'\b(\d{4})\b', year_country_genre)
                            if year_match:
                                year = year_match.group(1)
                            country_match = re.search(r'\d{4}\s*/\s*([^/]+)', year_country_genre)
                            if country_match:
                                country = country_match.group(1).strip()
                            genre_match = re.search(r'\d{4}\s*/\s*[^/]+\s*/\s*(.*)', year_country_genre)
                            if genre_match:
                                genre = genre_match.group(1).strip()
                        
                        # 提取简介
                        intro = "暂无简介"
                        quote_p = item.find('p', class_='quote')
                        if quote_p:
                            span = quote_p.find('span')
                            if span:
                                intro = span.get_text(strip=True)
                        
                        # 提取封面图片URL
                        poster_url = ""
                        img_elem = item.find('img')
                        if img_elem and 'src' in img_elem.attrs:
                            poster_url = img_elem['src']
                        
                        movie_list.append({
                            'rank': int(rank),
                            'title': self.clean_text(main_title),
                            'original_title': self.clean_text(other_title),
                            'rating': float(rating),
                            'director': self.clean_text(director),
                            'actors': self.clean_text(actors),
                            'year': self.clean_text(year),
                            'country': self.clean_text(country),
                            'genre': self.clean_text(genre),
                            'intro': self.clean_text(intro),
                            'poster_url': poster_url
                        })
                        
                    except Exception as e:
                        logger.error(f"解析电影项时出错: {str(e)}")
                        continue
                
                logger.info(f"成功获取 {url} 的数据，共 {len(movie_list)} 条")
                return movie_list
                
            except Exception as e:
                logger.error(f"请求失败: {url}，错误：{str(e)}")
                if attempt < retry_count - 1:
                    wait_time = random.uniform(20, 40) * (attempt + 1)
                    logger.info(f"等待 {wait_time:.2f} 秒后重试...")
                    time.sleep(wait_time)
                else:
                    logger.error(f"已达到最大重试次数，放弃请求: {url}")
        
        return []

    def save_to_temp_table(self, movies):
        """保存数据到临时表"""
        if not movies:
            return 0
        
        # 清空临时表
        self.cursor.execute(f"TRUNCATE TABLE {self.temp_table}")
        
        insert_sql = f"""
        INSERT INTO {self.temp_table} (rank_num, title, original_title, rating, director, actors, year, country, genre, intro, poster_url)
        VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
        """
        
        values = [(
            movie['rank'],
            movie['title'],
            movie['original_title'],
            movie['rating'],
            movie['director'],
            movie['actors'],
            movie['year'],
            movie['country'],
            movie['genre'],
            movie['intro'],
            movie['poster_url']
        ) for movie in movies]
        
        try:
            count = self.cursor.executemany(insert_sql, values)
            self.conn.commit()
            logger.info(f"成功插入 {count} 条电影数据到临时表")
            return count
        except Exception as e:
            self.conn.rollback()
            logger.error(f"插入数据到临时表失败: {str(e)}")
            return 0

    def swap_tables(self):
        """交换临时表和正式表"""
        try:
            # 开始事务
            self.cursor.execute("START TRANSACTION")
            
            # 重命名表
            self.cursor.execute(f"RENAME TABLE movies TO movies_old, {self.temp_table} TO movies")
            
            # 删除旧表
            self.cursor.execute("DROP TABLE movies_old")
            
            # 提交事务
            self.conn.commit()
            logger.info("成功更新电影数据表")
            return True
        except Exception as e:
            self.conn.rollback()
            logger.error(f"更新电影数据表失败: {str(e)}")
            return False

    def update_stats(self, total_movies):
        try:
            self.cursor.execute("UPDATE crawl_stats SET total_movies = %s WHERE id = 1", (total_movies,))
            self.conn.commit()
        except Exception as e:
            logger.error(f"更新统计信息失败: {str(e)}")
            self.conn.rollback()

    def get_stats(self):
        try:
            self.cursor.execute("SELECT total_movies FROM crawl_stats WHERE id = 1")
            result = self.cursor.fetchone()
            return result[0] if result else 0
        except Exception as e:
            logger.error(f"获取统计信息失败: {str(e)}")
            return 0

    def load_progress(self):
        try:
            self.cursor.execute("SELECT page_index FROM crawl_progress")
            pages = set([row[0] for row in self.cursor.fetchall()])
            logger.info(f"从数据库加载进度，已完成 {len(pages)} 个页面")
            return pages
        except Exception as e:
            logger.error(f"加载进度失败: {str(e)}")
            return set()

    def save_progress(self, page):
        try:
            self.cursor.execute(
                "INSERT INTO crawl_progress (page_index) VALUES (%s) ON DUPLICATE KEY UPDATE crawl_time=CURRENT_TIMESTAMP",
                (page,)
            )
            self.conn.commit()
        except Exception as e:
            logger.error(f"保存进度失败: {str(e)}")
            self.conn.rollback()

    def crawl(self, reset=False, scheduled_time=None):
        """
        爬取电影数据
        :param reset: 是否重置进度
        :param scheduled_time: 指定爬取时间（datetime对象）
        """
        try:
            # 如果指定了爬取时间，等待到指定时间
            if scheduled_time:
                current_time = datetime.now()
                if current_time < scheduled_time:
                    wait_seconds = (scheduled_time - current_time).total_seconds()
                    logger.info(f"等待到指定时间 {scheduled_time}，还有 {wait_seconds:.0f} 秒")
                    time.sleep(wait_seconds)

            if reset:
                logger.info("重置进度，清空进度表")
                self.cursor.execute("TRUNCATE TABLE crawl_progress")
                self.cursor.execute("UPDATE crawl_stats SET total_movies = 0 WHERE id = 1")
                self.conn.commit()
                crawled_pages = set()
            else:
                crawled_pages = self.load_progress()
            
            page_indices = [p for p in range(0, 250, 25) if p not in crawled_pages]
            total_pages = len(page_indices)
            
            all_movies = []
            for page_index, page in enumerate(page_indices, 1):
                url = self.base_url.format(page)
                logger.info(f"正在爬取第 {page_index}/{total_pages} 页: {url}")
                
                movies = self.get_page_data(url)
                
                if not movies:
                    logger.warning(f"页面 {url} 无数据，跳过...")
                    continue
                
                all_movies.extend(movies)
                self.save_progress(page)
                
                delay = random.uniform(10, 20)
                logger.info(f"完成页面抓取，休息 {delay:.2f} 秒...")
                time.sleep(delay)
            
            if all_movies:
                # 保存到临时表
                saved_count = self.save_to_temp_table(all_movies)
                if saved_count > 0:
                    # 如果临时表数据保存成功，则更新正式表
                    if self.swap_tables():
                        self.update_stats(saved_count)
                        logger.info(f"抓取完成，成功更新 {saved_count} 条电影数据")
                    else:
                        logger.error("更新正式表失败，保留原有数据")
                else:
                    logger.error("保存到临时表失败，保留原有数据")
            else:
                logger.warning("未获取到任何电影数据，保留原有数据")
            
        except Exception as e:
            logger.error(f"爬取过程中发生错误: {str(e)}")
            logger.info("发生错误，保留原有数据")

    def start_periodic_crawl(self, interval=3600, scheduled_time=None):
        """
        启动定时爬取
        :param interval: 爬取间隔（秒）
        :param scheduled_time: 指定爬取时间（datetime对象）
        """
        self.running = True
        def run():
            while self.running:
                self.crawl(scheduled_time=scheduled_time)
                time.sleep(interval)
        t = threading.Thread(target=run, daemon=True)
        t.start()

    def stop(self):
        self.running = False
        if hasattr(self, 'conn') and self.conn:
            try:
                self.cursor.close()
                self.conn.close()
            except:
                pass 