import requests
import re
import time
import random
import pandas as pd
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options

# 豆瓣电影TOP250的基础URL
BASE_URL = 'https://movie.douban.com/top250'

# 配置Selenium的Chrome浏览器
chrome_options = Options()
chrome_options.add_argument('--headless')  # 无头模式
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('start-maximized')
chrome_options.add_argument('enable-automation')
chrome_options.add_argument('--disable-infobars')
chrome_options.add_argument('--disable-dev-shm-usage')

def fetch_page_with_selenium(url):
    """
    使用Selenium模拟浏览器获取页面内容
    """
    try:
        driver = webdriver.Chrome(options=chrome_options)
        driver.get(url)
        time.sleep(random.uniform(2, 4))  # 等待页面加载
        page_source = driver.page_source
        driver.quit()
        return page_source
    except Exception as e:
        print(f"Error fetching page {url} with Selenium: {e}")
        return None

def parse_movie_info_with_regex(html_content):
    """
    使用正则表达式解析HTML内容，提取电影信息
    """
    movies = []

    # 正则表达式提取电影条目
    movie_items = re.findall(r'<div class="item">(.*?)</li>', html_content, re.S)

    for item in movie_items:
        # 提取电影名称
        title_match = re.search(r'<span class="title">(.*?)</span>', item)
        title = title_match.group(1) if title_match else ''

        # 提取电影详情链接
        link_match = re.search(r'<a href="(.*?)"', item)
        link = link_match.group(1) if link_match else ''

        # 提取电影年份和国籍
        info_match = re.search(r'<p class="">(.*?)</p>', item, re.S)
        year, country = '', ''
        if info_match:
            info_text = info_match.group(1).strip()
            details = re.split(r'\s*/\s*', info_text)
            if len(details) >= 2:
                year = re.search(r'\d{4}', details[0])
                year = year.group(0) if year else ''
                country = details[1].strip()

        movies.append({
            'title': title,
            'year': year,
            'country': country,
            'url': link
        })

    return movies

def crawl_douban_top250():
    """
    爬取豆瓣电影TOP250
    """
    all_movies = []

    for i in range(10):  # 豆瓣TOP250分为10页，每页25条记录
        url = f'{BASE_URL}?start={i * 25}&filter='
        print(f"Fetching page {i + 1} using Selenium...")

        html_content = fetch_page_with_selenium(url)
        if html_content:
            movies = parse_movie_info_with_regex(html_content)
            all_movies.extend(movies)

        # 随机延迟，防止被封禁
        time.sleep(random.uniform(1, 3))

    return all_movies

if __name__ == '__main__':
    print("开始爬取豆瓣电影TOP250...")
    movies = crawl_douban_top250()

    # 将数据保存为CSV文件
    df = pd.DataFrame(movies)
    df.to_csv('douban_top250_movies.csv', index=False, encoding='utf-8-sig')

    print(f"爬取完成，共爬取 {len(movies)} 部电影，已保存到 douban_top250_movies.csv")
