import requests
from bs4 import BeautifulSoup
import time
import csv
import re

# 豆瓣电影Top250的URL模板
url_template = "https://movie.douban.com/top250?start={}&filter="

# 设置请求头以模拟浏览器访问
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
    'Accept-Language': 'zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7',
    'Accept-Encoding': 'gzip, deflate, br',
    'Connection': 'keep-alive',
    'Referer': 'https://movie.douban.com/',
}

def parse_movie_info(info_text):
    """
    解析电影信息文本，提取导演、主演、年份、地区和类型
    """
    # 初始化默认值
    director = ""
    actors = ""
    year = ""
    region = ""
    genre = ""
    
    if info_text:
        # 清理文本，移除多余的空白字符和换行符
        info_text = re.sub(r'\s+', ' ', info_text.strip())
        
        # 提取导演信息
        director_match = re.search(r'导演: ([^ ]+)', info_text)
        if director_match:
            director = director_match.group(1).strip()
        
        # 提取主演信息
        actors_match = re.search(r'主演: ([^ ]+)', info_text)
        if actors_match:
            actors = actors_match.group(1).strip()
        
        # 提取年份、地区和类型
        # 这部分通常在最后的几段信息中
        parts = info_text.split('<br/>') if '<br/>' in info_text else info_text.split('<br>')
        if len(parts) > 1:
            last_part = parts[-1].strip()
            # 分割年份、地区和类型
            segments = [seg.strip() for seg in last_part.split('/')]
            if len(segments) >= 3:
                year = segments[0].strip()
                region = segments[1].strip()
                genre = segments[2].strip()[:-2]
    
    return {
        'director': director,
        'actors': actors,
        'year': year,
        'region': region,
        'genre': genre
    }

def scrape_douban_movies():
    movies = []
    
    # 遍历start参数从0到225，步长为25（共10页）
    for i in range(0, 10):  # 0-9
        start = i * 25
        url = url_template.format(start)
        
        print(f"正在爬取第 {i+1} 页: {url}")
        
        try:
            response = requests.get(url, headers=headers, timeout=10)
            response.encoding = 'utf-8'
            
            if response.status_code == 200:
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # 检查是否成功加载页面（是否有电影列表）
                movie_items = soup.find_all('div', class_='item')
                
                if not movie_items:
                    print("警告：未找到电影项目，可能是触发了反爬虫机制")
                    print("建议稍后再试，或者手动检查页面内容")
                    # 可以选择在这里break跳出循环
                
                for item in movie_items:
                    try:
                        # 获取排名
                        rank_em = item.find('div', class_='pic').find('em')
                        rank = rank_em.text if rank_em else 'N/A'
                        
                        # 获取电影链接和图片
                        pic_div = item.find('div', class_='pic')
                        link_a = pic_div.find('a') if pic_div else None
                        link = link_a['href'] if link_a else 'N/A'
                        img = pic_div.find('img') if pic_div else None
                        image = img['src'] if img else 'N/A'
                        
                        # 获取电影信息
                        info_div = item.find('div', class_='info')
                        if not info_div:
                            continue
                            
                        hd_div = info_div.find('div', class_='hd')
                        bd_div = info_div.find('div', class_='bd')
                        
                        # 标题处理
                        title_spans = hd_div.find_all('span', class_='title') if hd_div else []
                        title = ''.join([t.text.strip() for t in title_spans]) if title_spans else 'N/A'
                        
                        # 其他名称
                        other_span = hd_div.find('span', class_='other') if hd_div else None
                        other_title = other_span.text.strip() if other_span else ''
                        
                        # 评分信息
                        star_div = bd_div.find('div', class_='star') if bd_div else None
                        rating_span = star_div.find('span', class_='rating_num') if star_div else None
                        rating = rating_span.text.strip() if rating_span else 'N/A'
                        
                        # 评价人数
                        people_spans = star_div.find_all('span') if star_div else []
                        rating_people = 'N/A'
                        if people_spans and len(people_spans) > 1:
                            rating_people_span = people_spans[-1]
                            rating_people = rating_people_span.text.strip().replace('人评价', '') if rating_people_span else 'N/A'
                        
                        # 电影简介
                        quote_p = bd_div.find('p', class_='quote') if bd_div else None
                        quote_span = quote_p.find('span') if quote_p else None
                        quote = quote_span.text.strip() if quote_span else ''
                        
                        # 详细信息
                        info_p = bd_div.find('p') if bd_div else None
                        bd_text = str(info_p) if info_p else ''
                        
                        # 解析电影详细信息
                        parsed_info = parse_movie_info(bd_text)
                        
                        movie_info = {
                            'rank': rank,
                            'title': title,
                            'other_title': other_title,
                            'director': parsed_info['director'],
                            'actors': parsed_info['actors'],
                            'year': parsed_info['year'],
                            'region': parsed_info['region'],
                            'genre': parsed_info['genre'],
                            'rating': rating,
                            'rating_people': rating_people,
                            'link': link,
                            'image': image,
                            'quote': quote
                        }
                        
                        movies.append(movie_info)
                        print(f"已获取电影: {title}")
                        
                    except Exception as e:
                        print(f"处理单个电影项目时出错: {e}")
                        continue
                
                # 添加延时以避免过于频繁的请求
                time.sleep(3)
                
            else:
                print(f"请求失败，状态码: {response.status_code}")
                
        except Exception as e:
            print(f"处理页面时出错: {e}")
            # 出错时也适当延时
            time.sleep(5)
            continue
    
    return movies

def save_to_csv(movies, filename='douban_top250.csv'):
    with open(filename, 'w', newline='', encoding='utf-8-sig') as csvfile:
        fieldnames = ['rank', 'title', 'other_title', 'director', 'actors', 'year', 'region', 'genre', 'rating', 'rating_people', 'link', 'image', 'quote']
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        
        writer.writeheader()
        for movie in movies:
            writer.writerow(movie)
    
    print(f"数据已保存至 {filename}")

if __name__ == "__main__":
    print("开始爬取豆瓣电影Top250...")
    movies = scrape_douban_movies()
    print(f"共获取 {len(movies)} 部电影信息")
    save_to_csv(movies)
    print("爬取完成!")