import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
from typing import List, Dict, Any

class DoubanMovieCrawler:
    """豆瓣电影Top250爬虫"""
    
    def __init__(self):
        self.base_url = "https://movie.douban.com/top250"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        self.movies = []
    
    def fetch_page(self, page_num: int) -> str:
        """获取指定页数的HTML内容"""
        params = {'start': page_num * 25}  # 每页25条记录
        
        try:
            response = requests.get(self.base_url, params=params, headers=self.headers)
            response.raise_for_status()  # 检查请求是否成功
            return response.text
        except requests.RequestException as e:
            print(f"请求出错: {e}")
            return ""
    
    def parse_page(self, html: str) -> None:
        """解析HTML内容，提取电影信息"""
        soup = BeautifulSoup(html, 'html.parser')
        items = soup.select('div.item')
        
        for item in items:
            try:
                title = item.select_one('span.title').text.strip()
                rating = item.select_one('span.rating_num').text.strip()
                quote = item.select_one('p.quote')
                quote = quote.text.strip() if quote else "暂无简介"
                
                self.movies.append({
                    'title': title,
                    'rating': float(rating),
                    'quote': quote
                })
            except Exception as e:
                print(f"解析出错: {e}")
    
    def crawl(self, pages: int = 10) -> None:
        """爬取指定页数的电影信息"""
        for page in range(pages):
            print(f"正在爬取第 {page+1} 页...")
            html = self.fetch_page(page)
            if html:
                self.parse_page(html)
            
            # 随机延时，避免过快请求被封IP
            wait_time = random.uniform(1, 3)
            print(f"等待 {wait_time:.2f} 秒后继续...")
            time.sleep(wait_time)
    
    def save_to_csv(self, filename: str = 'douban_top250.csv') -> None:
        """将爬取的电影信息保存为CSV文件"""
        df = pd.DataFrame(self.movies)
        df.to_csv(filename, index=False, encoding='utf-8-sig')
        print(f"数据已保存至 {filename}")

if __name__ == "__main__":
    crawler = DoubanMovieCrawler()
    crawler.crawl(pages=10)  # 爬取前10页，共250部电影
    crawler.save_to_csv()    