import requests
from bs4 import BeautifulSoup
import csv
import time
import random

# 设置请求头模拟浏览器访问
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3',
    'Referer': 'https://movie.douban.com/explore'
}


def get_hot_movies():
    """
    获取豆瓣热门电影列表
    通过豆瓣AJAX接口获取当前热门电影信息
    返回包含电影基本信息的列表，每个元素包含标题和详情页URL
    """
    url = "https://movie.douban.com/j/search_subjects"
    params = {
        "type": "movie",
        "tag": "热门",
        "sort": "recommend",
        "page_limit": "20",
        "page_start": "0"
    }

    response = requests.get(url, params=params, headers=headers)
    if response.status_code != 200:
        print("请求失败，状态码：", response.status_code)
        return []

    # 解析返回的JSON数据
    movies = response.json()['subjects']
    return movies


def get_movie_details(url):
    """
    获取单个电影详细信息
    参数url: 电影详情页URL
    返回包含完整信息的字典，包含标题、评分、导演等信息
    """
    # 随机延时防止被封禁
    time.sleep(random.uniform(1, 3))
    response = requests.get(url, headers=headers)
    if response.status_code != 200:
        print(f"请求失败，状态码：{response.status_code}，URL：{url}")
        return None

    soup = BeautifulSoup(response.text, 'html.parser')
    details = {}

    try:
        # 电影标题
        title_element = soup.find('h1').find('span')
        details['标题'] = title_element.text.strip() if title_element else ''

        # 评分信息
        rating_element = soup.find('strong', class_='ll rating_num')
        details['评分'] = rating_element.text.strip() if rating_element else ''

        # 导演信息（可能有多个）
        directors = [a.text.strip() for a in soup.findAll('a', rel='v:directedBy')]
        details['导演'] = '/'.join(directors) if directors else ''

        # 演员信息（可能有多个）
        actors = [a.text.strip() for a in soup.findAll('a', rel='v:starring')]
        details['演员'] = '/'.join(actors) if actors else ''

        # 电影类型（可能有多个）
        genres = [g.text.strip() for g in soup.findAll('span', property='v:genre')]
        details['类型'] = '/'.join(genres) if genres else ''

        # 上映日期（可能有多个地区日期，取第一个）
        date_element = soup.find('span', property='v:initialReleaseDate')
        details['上映日期'] = date_element.text.strip() if date_element else ''

        # 电影简介（处理换行和空白字符）
        summary_element = soup.find('span', property='v:summary')
        if summary_element:
            summary = summary_element.text.strip()
            details['简介'] = ' '.join(summary.split()).strip()
        else:
            details['简介'] = ''

    except Exception as e:
        print(f"解析失败: {url}，错误：{str(e)}")
        return None

    return details


def save_to_csv(data):
    """
    将数据保存到CSV文件
    参数data: 包含电影信息的字典列表
    生成文件使用中文表头，编码为utf-8-sig兼容Excel
    """
    # 定义中文表头与字段对应关系
    fieldnames = ['标题', '评分', '导演', '演员', '类型', '上映日期', '简介', '链接']

    with open('douban_movies.csv', 'w', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        writer.writeheader()
        for movie in data:
            # 添加链接字段并写入数据
            movie_row = {key: movie.get(key, '') for key in fieldnames}
            writer.writerow(movie_row)


def main():
    """
    主执行函数
    1. 获取热门电影列表
    2. 获取每个电影的详细信息
    3. 保存数据到CSV文件
    """
    hot_movies = get_hot_movies()
    if not hot_movies:
        print("未获取到热门电影列表")
        return

    movie_data = []
    for movie in hot_movies:
        print(f"正在爬取：{movie['title']}")
        details = get_movie_details(movie['url'])
        if details:
            # 添加电影详情页链接
            details['链接'] = movie['url']
            movie_data.append(details)

    if movie_data:
        save_to_csv(movie_data)
        print(f"成功爬取{len(movie_data)}部电影信息，已保存到douban_movies.csv")
    else:
        print("未获取到有效电影信息")


if __name__ == "__main__":
    main()