import requests
from bs4 import BeautifulSoup
import time
import random
import json
from fake_useragent import UserAgent


def get_douban_top250():
    # 1. 初始化设置
    ua = UserAgent()
    session = requests.Session()
    all_movies = []

    # 2. 必须的请求头 (更新至2024年最新)
    headers = {
        "User-Agent": ua.random,
        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
        "Accept-Language": "zh-CN,zh;q=0.9",
        "Referer": "https://www.douban.com/",
        "Connection": "keep-alive",
        "Host": "movie.douban.com",
        "Upgrade-Insecure-Requests": "1",
        "Sec-Fetch-Dest": "document",
        "Sec-Fetch-Mode": "navigate",
        "Sec-Fetch-Site": "same-origin",
        "Sec-Fetch-User": "?1"
    }

    # 3. 从浏览器获取的最新cookie (关键！)
    cookies = {
        'bid': ''.join(random.choices('abcdefghijklmnopqrstuvwxyz1234567890', k=11)),
        'douban-fav-remind': '1',
        'll': '"118282"',
        '__utmz': '30149280.1689234567.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none)',
        '__utmc': '30149280',
        '__utma': '30149280.1234567890.1689234567.1689234567.1689234567.1',
        '__utmb': '30149280.0.10.1689234567'
    }

    # 4. 分页抓取
    for start in range(0, 250, 25):
        url = f"https://movie.douban.com/top250?start={start}"
        print(f"正在处理: {url}")

        try:
            # 随机延迟+更换UA
            time.sleep(random.uniform(3, 6))
            headers["User-Agent"] = ua.random

            # 使用会话和cookies
            response = session.get(url, headers=headers, cookies=cookies, timeout=10)

            # 验证响应
            if response.status_code != 200:
                print(f"请求失败，状态码: {response.status_code}")
                continue

            if "检测到有异常请求" in response.text:
                print("触发反爬机制！请更新Cookie或使用代理")
                break

            # 5. 新版解析逻辑 (2024年有效)
            soup = BeautifulSoup(response.text, 'html.parser')
            items = soup.select('.grid_view li')

            if not items:
                print("未解析到电影数据，可能是页面结构变化")
                print("调试信息：", soup.select('.grid_view')[:200])
                break

            for item in items:
                try:
                    data = {
                        'title': item.select_one('.title').text.strip(),
                        'rating': item.select_one('.rating_num').text.strip(),
                        'link': item.select_one('a')['href'],
                        'img': item.select_one('img')['src'],
                        'quote': item.select_one('.inq').text if item.select_one('.inq') else None,
                        'info': item.select_one('.bd p').text.strip().split('\n')[0].strip()
                    }
                    all_movies.append(data)
                except Exception as e:
                    print(f"解析单个电影时出错: {e}")
                    continue

        except Exception as e:
            print(f"处理 {url} 时出错: {e}")
            continue

    # 6. 保存结果
    if all_movies:
        with open('douban_top250.json', 'w', encoding='utf-8') as f:
            json.dump(all_movies, f, ensure_ascii=False, indent=2)
        print(f"成功获取 {len(all_movies)} 部电影数据")
    else:
        print("未能获取任何数据，请检查：")
        print("1. Cookie是否有效")
        print("2. 是否触发反爬")
        print("3. 页面结构是否变化")


def get_real_cookie():
    """指导用户获取真实Cookie"""
    print("\n如何获取有效Cookie：")
    print("1. 使用Chrome浏览器登录豆瓣账号")
    print("2. 访问 https://movie.douban.com/top250")
    print("3. 按F12打开开发者工具")
    print("4. 切换到Network选项卡")
    print("5. 刷新页面，点击第一个请求")
    print("6. 在Headers中找到Cookie值")
    print("7. 替换代码中的cookies字典")


if __name__ == '__main__':
    get_real_cookie()
    get_douban_top250()