import requests
from bs4 import BeautifulSoup
import csv
import time

# 基础配置
BASE_URL = "https://movie.douban.com/top250"
HEADERS = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}


def get_page(url):
    """获取页面内容"""
    try:
        response = requests.get(url, headers=HEADERS)
        response.raise_for_status()  # 检查HTTP状态码
        response.encoding = 'utf-8'
        return response.text
    except requests.exceptions.RequestException as e:
        print(f"请求失败: {e}")
        return None


def parse_html(html):
    """解析HTML内容"""
    soup = BeautifulSoup(html, 'html.parser')
    items = soup.find_all('div', class_='item')

    movies = []
    for item in items:
        # 提取电影信息
        title = item.find('span', class_='title').text.strip()
        rating = item.find('span', class_='rating_num').text.strip()
        link = item.find('a')['href']

        # 处理可能缺失的信息
        year_tag = item.find('div', class_='bd').find('br')
        year = year_tag.next_sibling.strip() if year_tag else '未知年份'

        movies.append({
            'title': title,
            'rating': rating,
            'year': year,
            'link': link
        })
    return movies


def save_to_csv(data, filename='douban_top250.csv'):
    """保存数据到CSV文件"""
    with open(filename, 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=['title', 'rating', 'year', 'link'])
        writer.writeheader()
        writer.writerows(data)


def main():
    all_movies = []

    # 分页抓取（每页25条，共10页）
    for page in range(0, 250, 25):
        url = f"{BASE_URL}?start={page}"
        print(f"正在抓取: {url}")

        html = get_page(url)
        if html:
            movies = parse_html(html)
            all_movies.extend(movies)
            time.sleep(2)  # 遵守爬虫礼仪，设置请求间隔

    if all_movies:
        save_to_csv(all_movies)
        print(f"成功保存{len(all_movies)}条数据到douban_top250.csv")
    else:
        print("未获取到有效数据")


if __name__ == '__main__':
    main()