import requests
import csv
from time import sleep
import re
from datetime import datetime
from bs4 import BeautifulSoup

# 定义要爬取的电影列表
movies = [
    {'id': '36296619', 'name': '志愿军：浴血和平'},
    # 可以添加更多电影，格式：{'id': '电影ID', 'name': '电影名称'}
]

headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36",
    "cookie": 'll="118252"; bid=RP1pKdPBZxw; _pk_id.100001.4cf6=af45169916d06554.1761901829.; __yadk_uid=UjH5QYbn8k14N6g9HH2dXljRqWu3Ww11; _vwo_uuid_v2=D748601678751647988003FADD42FF160|4e738d89ddc907b652244bb9834017a8; dbcl2="292055264:QXIfeyyaulw"; push_noty_num=0; push_doumail_num=0; ck=aBj2; frodotk_db="59e4bca4333ab2a00dae35ec99b017e0"; __utmc=30149280; __utmv=30149280.29205; __utmc=223695111; __utmz=30149280.1762241735.3.3.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utmz=223695111.1762241745.3.3.utmcsr=douban.com|utmccn=(referral)|utmcmd=referral|utmcct=/; ap_v=0,6.0; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1762262993%2C%22https%3A%2F%2Fwww.douban.com%2F%22%5D; _pk_ses.100001.4cf6=1; __utma=30149280.1143164688.1761901829.1762256698.1762262993.6; __utmb=30149280.0.10.1762262993; __utma=223695111.997674525.1761901829.1762256713.1762262993.6; __utmb=223695111.0.10.1762262993'
}


def parse_date(date_str):
    """解析豆瓣时间格式"""
    if not date_str or not isinstance(date_str, str):
        return "未知时间"

    # 豆瓣日期格式通常是 "2023-01-22" 或 "2023-01-22 12:30:00"
    try:
        # 尝试解析完整时间
        if len(date_str) > 10:
            dt = datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S')
        else:
            dt = datetime.strptime(date_str, '%Y-%m-%d')
        return dt.strftime('%Y-%m-%d %H:%M:%S')
    except:
        return date_str


def extract_rating(comment_item):
    """从评论元素中提取评分"""
    try:
        # 查找包含评分的span标签
        rating_span = comment_item.find('span', class_=re.compile(r'allstar(\d+)0'))
        if rating_span:
            rating_class = rating_span.get('class', [])
            for cls in rating_class:
                if cls.startswith('allstar'):
                    rating = cls.replace('allstar', '')
                    return str(int(rating) // 10)  # allstar50 -> 5分
    except:
        pass
    return '无评分'


def get_all_comments():
    """爬取所有电影评论并保存到CSV"""
    with open('douban_movie_comments.csv', 'w', newline='', encoding='utf-8-sig') as csvfile:
        writer = csv.writer(csvfile)
        writer.writerow(['电影名称', '用户名', '评分', '评论内容', '评论时间', '有用数'])

        for movie in movies:
            movie_id = movie['id']
            movie_name = movie['name']

            print(f"\n开始爬取 {movie_name} 的评论...")

            # 爬取多页评论
            max_pages = 20 # 限制爬取页数，避免被封
            comments_count = 0

            for page in range(max_pages):
                try:
                    start = page *20  # 豆瓣每页20条评论
                    url = f'https://movie.douban.com/subject/{movie_id}/comments?start={start}&limit=20&status=P&sort=new_score'

                    print(f"正在爬取 {movie_name} 第{page + 1}页...")

                    response = requests.get(url, headers=headers, timeout=15)
                    response.raise_for_status()

                    # 检查是否被反爬
                    if response.status_code == 403:
                        print("访问被拒绝，可能触发了反爬机制")
                        break

                    soup = BeautifulSoup(response.text, 'html.parser')
                    comment_items = soup.find_all('div', class_='comment-item')

                    if not comment_items:
                        print("没有找到更多评论")
                        break

                    page_comments_count = process_comments(comment_items, movie_name, writer)
                    comments_count += page_comments_count

                    print(f"第{page + 1}页爬取到{page_comments_count}条评论")

                    sleep(2)  # 重要：增加延迟防止被封

                except Exception as e:
                    print(f"第{page + 1}页爬取失败: {str(e)}")
                    continue

            print(f"{movie_name} 共爬取 {comments_count} 条评论")

    print("\n所有数据已保存到 douban_movie_comments.csv")


def process_comments(comment_items, movie_name, writer):
    """处理并写入评论数据"""
    count = 0
    for item in comment_items:
        try:
            # 提取用户信息
            user_info = item.find('span', class_='comment-info')
            if not user_info:
                continue

            # 用户名
            username_elem = user_info.find('a')
            username = username_elem.text.strip() if username_elem else '匿名用户'

            # 评分
            rating = extract_rating(user_info)

            # 评论时间
            time_elem = user_info.find('span', class_='comment-time')
            comment_time = time_elem.text.strip() if time_elem else '未知时间'
            comment_time = parse_date(comment_time)

            # 评论内容
            content_elem = item.find('span', class_='short')
            content = content_elem.text.strip() if content_elem else '无内容'

            # 有用数
            useful_elem = item.find('span', class_='votes')
            useful_count = useful_elem.text.strip() if useful_elem else '0'

            # 写入CSV
            writer.writerow([
                movie_name,
                username,
                rating,
                content,
                comment_time,
                useful_count
            ])
            count += 1

        except Exception as e:
            print(f"处理单条评论时出错: {str(e)}")
            continue

    return count


if __name__ == '__main__':
    get_all_comments()