import requests
import time
import random
import re
import csv
import os
from bs4 import BeautifulSoup

# 全局变量
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Referer': 'https://movie.douban.com/',
    'Connection': 'keep-alive'
}
base_url = 'https://movie.douban.com/top250'
comment_url = 'https://movie.douban.com/subject/{}/comments?start={}&limit=20&status=P&sort=new_score'


def get_page(url):
    """获取页面内容"""
    try:
        response = requests.get(url, headers=headers)
        if response.status_code == 200:
            return response.text
        return None
    except Exception as e:
        print(f"Error fetching page {url}: {e}")
        return None


def parse_movie_list(page):
    """解析电影列表页面"""
    movies = []
    soup = BeautifulSoup(page, 'html.parser')
    movie_list = soup.select('ol.grid_view li')

    for movie in movie_list:
        try:
            movie_info = {}
            movie_info['rank'] = movie.select_one('.pic em').text
            title_element = movie.select_one('.title')
            movie_info['title'] = title_element.text
            movie_info['url'] = movie.select_one('.hd a')['href']
            movie_info['id'] = re.search(r'subject/(\d+)/', movie_info['url']).group(1)
            movie_info['rating'] = float(movie.select_one('.rating_num').text)

            info_text = movie.select_one('.bd p').text.strip()
            info_parts = info_text.split('\n')
            if len(info_parts) >= 2:
                movie_info['director_actors'] = info_parts[0].strip()
                movie_info['year_country_type'] = info_parts[1].strip()
                # 提取年份
                year_match = re.search(r'(\d{4})', movie_info['year_country_type'])
                movie_info['year'] = int(year_match.group(1)) if year_match else None

            quote = movie.select_one('.quote .inq')
            movie_info['quote'] = quote.text if quote else ""

            movies.append(movie_info)
        except Exception as e:
            print(f"Error parsing movie: {e}")
    return movies


def get_comments(movie_id, count=100):
    """获取电影短评"""
    comments = []
    for start in range(0, count, 20):
        url = comment_url.format(movie_id, start)
        page = get_page(url)
        if not page:
            continue

        soup = BeautifulSoup(page, 'html.parser')
        comment_items = soup.select('.comment-item')

        for item in comment_items:
            try:
                comment = {}
                comment['author'] = item.select_one('.comment-info a').text
                rating_element = item.select_one('.rating')
                comment['rating'] = rating_element['title'] if rating_element else "无评分"
                comment['time'] = item.select_one('.comment-time').text.strip()
                comment['content'] = item.select_one('.short').text.strip()
                comments.append(comment)
            except Exception as e:
                print(f"Error parsing comment: {e}")

        if len(comments) >= count or len(comment_items) < 20:
            break

        time.sleep(random.uniform(3, 5))
    return comments[:count]


def crawl_movies():
    """爬取豆瓣Top100电影及其评论"""
    # 创建保存数据的目录
    if not os.path.exists('douban_data'):
        os.makedirs('douban_data')

    all_movies = []
    all_comments = []

    # 爬取电影列表（4页，每页25部电影）
    for page in range(4):
        url = f"{base_url}?start={page * 25}&filter="
        page_content = get_page(url)
        if page_content:
            movies = parse_movie_list(page_content)
            all_movies.extend(movies)
            print(f"已爬取第{page + 1}页电影列表，当前共{len(all_movies)}部电影")
            time.sleep(random.uniform(1, 3))

    # 保存电影基本信息
    with open('douban_data/movies.csv', 'w', encoding='utf-8', newline='') as f:
        writer = csv.DictWriter(f, fieldnames=['rank', 'title', 'id', 'rating', 'director_actors', 'year_country_type',
                                               'year', 'quote', 'url'])
        writer.writeheader()
        writer.writerows(all_movies)

    # 爬取每部电影的评论
    for movie in all_movies:
        movie_id = movie['id']
        movie_title = movie['title']
        print(f"正在爬取电影《{movie_title}》的评论...")

        comments = get_comments(movie_id, 100)

        # 保存单部电影评论到CSV文件（只包含评论基本信息）
        filename = f"douban_data/comments_{movie_title}.csv"
        with open(filename, 'w', encoding='utf-8', newline='') as f:
            writer = csv.DictWriter(f, fieldnames=['author', 'rating', 'time', 'content'])
            writer.writeheader()
            writer.writerows(comments)

        # 为汇总文件添加电影信息到评论中
        for comment in comments:
            comment['movie_title'] = movie_title
            comment['movie_id'] = movie_id
            comment['movie_rating'] = movie['rating']

        all_comments.extend(comments)

        print(f"已保存电影《{movie_title}》的{len(comments)}条评论")
        time.sleep(random.uniform(3, 5))

    # 保存所有评论到一个文件
    with ope
        print("数据已保存到 douban_data 目录")


if __name__ == "__main__":
    main()