import urllib.request
from urllib.request import Request
import re
import csv
import time
import random
import sys

# 生成更真实的HTTP头信息
def generate_headers():
    # 模拟真实浏览器的HTTP头
    user_agents = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.6 Safari/605.1.15',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/109.0.1518.78 Safari/537.36'
    ]
    
    # 生成随机的Cookie值
    def generate_bid():
        import hashlib
        import time
        return hashlib.md5(str(time.time() + random.randint(1, 1000000)).encode()).hexdigest()[:11]
    
    def generate_dbcl2():
        chars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*()_+-='
        return '"' + ''.join(random.choice(chars) for _ in range(24)) + '"'
    
    headers = {
        'User-Agent': random.choice(user_agents),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Sec-Fetch-Dest': 'document',
        'Sec-Fetch-Mode': 'navigate',
        'Sec-Fetch-Site': 'same-origin',
        'Sec-Fetch-User': '?1',
        'Cache-Control': 'max-age=0',
        'TE': 'trailers',
        'Referer': 'https://www.douban.com/',
        # 生成随机的Cookie
        'Cookie': f'bid={generate_bid()}; dbcl2={generate_dbcl2()}; __utmc=30149280; __utmz=30149280.1678900000.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); _vwo_uuid_v2=ABCDEFG123456789; __yadk_uid=abcdef123456789;'
    }
    
    return headers

# 电影信息列表
movies = []

# 获取页面内容的函数（增强版反反爬）
def get_page_content(url, max_retries=5):
    retries = 0
    
    # 先访问首页，建立会话
    if 'top250' in url:
        print(f"  正在预热会话...")
        try:
            home_url = 'https://www.douban.com/'
            home_headers = generate_headers()
            home_req = Request(home_url, headers=home_headers)
            with urllib.request.urlopen(home_req, timeout=10) as response:
                # 读取首页内容但不处理
                response.read()
            print(f"  会话预热完成")
            # 随机延迟
            time.sleep(random.uniform(2, 4))
        except Exception as e:
            print(f"  会话预热失败: {str(e)}")
    
    while retries < max_retries:
        try:
            # 每次都生成新的随机HTTP头
            headers = generate_headers()
            
            # 创建请求对象
            req = Request(url, headers=headers)
            
            # 模拟真实用户的浏览行为
            print(f"  正在请求页面: {url}")
            
            # 直接发送请求
            with urllib.request.urlopen(req, timeout=20) as response:
                # 检查响应状态
                if response.status != 200:
                    print(f"  页面返回非200状态码: {response.status}")
                    retries += 1
                    time.sleep(random.uniform(4, 8))
                    continue
                    
                # 获取Content-Type
                content_type = response.getheader('Content-Type', '')
                encoding = 'utf-8'
                if 'charset=' in content_type:
                    encoding = content_type.split('charset=')[-1]
                    
                # 读取响应内容并解码
                content = response.read()
                
                # 尝试不同的编码方式解码
                try:
                    return content.decode(encoding)
                except UnicodeDecodeError:
                    try:
                        return content.decode('utf-8')
                    except UnicodeDecodeError:
                        try:
                            return content.decode('gbk')
                        except UnicodeDecodeError:
                            # 最后的尝试 - 使用latin-1
                            return content.decode('latin-1')
            
        except urllib.error.HTTPError as e:
            retries += 1
            if e.code == 403:
                print(f"  获取页面被拒绝(403): {url}，正在尝试第{retries}/{max_retries}次重试...")
                # 403错误时等待更长时间，并且改变策略
                sleep_time = random.uniform(10, 15)
                print(f"  等待 {sleep_time:.2f} 秒后重试...")
                time.sleep(sleep_time)
            elif e.code == 429:
                print(f"  请求过于频繁(429): {url}，正在尝试第{retries}/{max_retries}次重试...")
                # 429错误时等待更长时间
                sleep_time = random.uniform(15, 20)
                print(f"  等待 {sleep_time:.2f} 秒后重试...")
                time.sleep(sleep_time)
            else:
                print(f"  获取页面HTTP错误 {e.code}: {url}，正在尝试第{retries}/{max_retries}次重试...")
                time.sleep(random.uniform(5, 10))
                
        except urllib.error.URLError as e:
            retries += 1
            print(f"  获取页面URL错误: {url}, 错误: {str(e)}，正在尝试第{retries}/{max_retries}次重试...")
            time.sleep(random.uniform(5, 10))
            
        except Exception as e:
            retries += 1
            print(f"  获取页面失败: {url}, 错误: {str(e)}，正在尝试第{retries}/{max_retries}次重试...")
            time.sleep(random.uniform(5, 10))
    
    print(f"  达到最大重试次数({max_retries})，获取页面失败: {url}")
    return None

# 抓取函数
def scrape_douban_top250():
    print("开始抓取豆瓣电影TOP250...")
    print("温馨提示：为避免触发豆瓣的反爬机制，程序会在请求之间添加适当延迟")
    
    # 抓取10页数据，每页25部电影
    for page in range(10):
        start = page * 25
        url = f'https://movie.douban.com/top250?start={start}&filter='
        print(f"\n正在抓取第{page+1}/10页...")
        
        # 页面间随机延迟，避免被检测
        if page > 0:
            sleep_time = random.uniform(5, 10)
            print(f"  页面切换延迟 {sleep_time:.2f} 秒...")
            time.sleep(sleep_time)
        
        # 获取页面内容
        content = get_page_content(url, max_retries=5)  # 增加重试次数
        if not content:
            print(f"  第{page+1}页获取失败，跳过")
            time.sleep(random.uniform(5, 8))
            continue
        
        try:
            # 使用更可靠的正则表达式提取电影信息
            # 先找到所有的电影条目容器
            movie_containers = re.findall(r'<div class="info">(.+?)</div>\s*</div>', content, re.DOTALL)
            
            if not movie_containers:
                # 尝试另一种匹配模式
                movie_containers = re.findall(r'<div class="item">(.+?)</div>', content, re.DOTALL)
            
            print(f"  找到 {len(movie_containers)} 部电影")
            
            for i, item in enumerate(movie_containers):
                try:
                    # 电影名称
                    title_match = re.search(r'<span class="title">([^<]+)</span>', item)
                    title = title_match.group(1) if title_match else '未知'
                    
                    # 评分
                    rating_match = re.search(r'<span class="rating_num"[^>]*>([^<]+)</span>', item)
                    rating_num = rating_match.group(1) if rating_match else '未知'
                    
                    # 评分人数
                    people_match = re.search(r'<span>(\d+)人评价</span>', item)
                    rating_people = f"{people_match.group(1)}人评价" if people_match else '未知'
                    
                    # 短评
                    quote_match = re.search(r'<span class="inq">([^<]+)</span>', item)
                    quote = quote_match.group(1) if quote_match else '无短评'
                    
                    # 详情页链接
                    link_match = re.search(r'<a href="([^"]+)"', item)
                    detail_link = link_match.group(1) if link_match else ''
                    
                    # 获取详细信息
                    director, actors, year, country, genre = get_movie_details(detail_link)
                    
                    # 添加到列表
                    movies.append([title, director, actors, year, country, genre, rating_num, rating_people, quote])
                    
                    # 打印进度
                    print(f"  {i+1}. 已抓取: {title} - {rating_num}")
                    
                    # 随机延迟，避免被反爬
                    time.sleep(random.uniform(2, 4))
                    
                    # 每抓取5部电影，额外休息一段时间
                    if (i+1) % 5 == 0:
                        extra_sleep = random.uniform(3, 6)
                        print(f"  休息 {extra_sleep:.2f} 秒...")
                        time.sleep(extra_sleep)
                        
                except Exception as e:
                    print(f"  解析第{i+1}部电影时出错: {str(e)}")
                    # 出错时短暂延迟
                    time.sleep(random.uniform(1, 2))
                    continue
                    
        except Exception as e:
            print(f"解析第{page+1}页时出错: {str(e)}")
            # 出错时延迟更长时间
            time.sleep(random.uniform(5, 10))
        
        # 保存当前进度
        if movies:
            print(f"\n  已累计抓取 {len(movies)} 部电影")
            # 临时保存，以防程序中断
            try:
                with open('豆瓣电影TOP250_临时进度.csv', 'w', newline='', encoding='utf-8-sig') as f:
                    writer = csv.writer(f)
                    writer.writerow(['电影名称', '导演', '主演', '年份', '国别', '类型', '评分', '评分人数', '短评'])
                    writer.writerows(movies)
                print(f"  临时进度已保存")
            except Exception as e:
                print(f"  保存临时进度失败: {str(e)}")

# 获取电影详情信息
def get_movie_details(detail_link):
    if not detail_link:
        return '未知', '未知', '未知', '未知', '未知'
    
    try:
        # 获取详情页内容
        content = get_page_content(detail_link)
        if not content:
            return '未知', '未知', '未知', '未知', '未知'
        
        # 使用正则表达式提取信息
        # 提取导演
        director_match = re.search(r'<span class="attrs">\s*<a[^>]+>([^<]+)</a>', content)
        director = director_match.group(1).strip() if director_match else '未知'
        
        # 提取主演
        actors_match = re.search(r'主演</span>\s*<span class="attrs">\s*([^<]+)', content, re.DOTALL)
        if actors_match:
            # 提取所有演员链接文本
            actors_text = actors_match.group(1)
            actors_links = re.findall(r'<a[^>]+>([^<]+)</a>', actors_text)
            actors = ' / '.join(actors_links[:5])  # 限制最多显示5个主演
        else:
            actors = '未知'
        
        # 提取年份
        year_match = re.search(r'<span property="v:initialReleaseDate"[^>]+>(\d{4})', content)
        year = year_match.group(1) if year_match else '未知'
        
        # 提取国家/地区
        country_match = re.search(r'制片国家/地区:</span>\s*([^<]+)', content)
        country = country_match.group(1).strip() if country_match else '未知'
        
        # 提取类型
        genre_match = re.search(r'<span property="v:genre">([^<]+)</span>', content)
        genre = genre_match.group(1) if genre_match else '未知'
        # 查找更多类型
        more_genres = re.findall(r'<span property="v:genre">([^<]+)</span>', content)
        if more_genres:
            genre = ' / '.join(more_genres)
        
        return director, actors, year, country, genre
        
    except Exception as e:
        print(f"获取详情时出错: {detail_link}, 错误: {str(e)}")
        return '未知', '未知', '未知', '未知', '未知'

# 保存到CSV文件（可直接用Excel打开）
def save_to_csv():
    if movies:
        print(f"共抓取到{len(movies)}部电影，开始保存到CSV文件...")
        # 定义CSV文件路径
        csv_file = '豆瓣电影TOP250.csv'
        
        try:
            # 写入CSV文件，使用utf-8-sig编码以支持中文
            with open(csv_file, 'w', newline='', encoding='utf-8-sig') as f:
                writer = csv.writer(f)
                # 写入表头
                writer.writerow(['电影名称', '导演', '主演', '年份', '国别', '类型', '评分', '评分人数', '短评'])
                # 写入数据
                writer.writerows(movies)
            print(f"CSV文件保存成功！文件路径：{csv_file}")
            print("提示：CSV文件可以直接用Excel打开查看和编辑。")
        except Exception as e:
            print(f"保存文件时出错: {str(e)}")
    else:
        print("没有抓取到电影数据！")

if __name__ == '__main__':
    print("===== 豆瓣电影TOP250爬虫 ====\n")
    print("本程序使用Python标准库开发，无需安装额外依赖\n")
    
    try:
        # 开始抓取
        scrape_douban_top250()
        
        # 保存数据
        save_to_csv()
        
    except KeyboardInterrupt:
        print("\n程序被用户中断")
    except Exception as e:
        print(f"\n程序运行出错: {str(e)}")
        print("提示：如果遇到网络问题，可以尝试再次运行程序")
    finally:
        print("\n程序结束")