import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
from tqdm import tqdm
import http.cookiejar
import logging

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

# 爬虫函数，抓取豆瓣电影Top250的所有页面
def crawl_douban_top250():
    # 创建会话对象
    session = requests.session()
    session.cookies = http.cookiejar.LWPCookieJar()
    
    # 丰富的User-Agent池
    user_agents = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.2 Safari/605.1.15',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36 Edg/119.0.0.0',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/121.0',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36'
    ]
    
    # 基础请求头，添加Cookie和Referer
    base_headers = {
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Connection': 'keep-alive',
        'Accept-Encoding': 'gzip, deflate, br',
        'Upgrade-Insecure-Requests': '1',
        'Cache-Control': 'max-age=0',
        'Referer': 'https://movie.douban.com/',
        # 添加一些常见的Cookie
        'Cookie': 'll="108288"; bid=example_bid_value; __utma=111111111.1111111111.1111111111.1111111111.1111111111.1; __utmb=111111111.0.10.1111111111; __utmc=111111111; __utmz=111111111.1111111111.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utmv=30149280.11111; _vwo_uuid_v2=EXAMPLEUUID'
    }
    
    # 存储所有电影信息的列表
    movies_list = []
    
    # 为了避免被封，我们先只抓取前几页试试
    # 后续可以根据情况逐步增加
    total_pages = 2  # 先抓取前2页，共50部电影
    
    # 使用tqdm显示进度条
    for page in tqdm(range(total_pages), desc="抓取进度"):
        # 计算起始索引
        start = page * 25
        url = f"https://movie.douban.com/top250?start={start}&filter="
        
        # 最多重试3次
        max_retries = 3
        retry_count = 0
        success = False
        
        while retry_count < max_retries and not success:
            try:
                # 随机选择一个User-Agent
                headers = base_headers.copy()
                headers['User-Agent'] = random.choice(user_agents)
                
                # 发送请求
                logger.info(f"正在抓取页面: {url}")
                response = session.get(url, headers=headers, timeout=30)
                response.encoding = 'utf-8'
                
                # 检查响应状态
                if response.status_code == 200:
                    success = True
                    break
                else:
                    print(f"页面 {url} 请求失败，状态码：{response.status_code}")
                    retry_count += 1
                    if retry_count < max_retries:
                        print(f"第 {retry_count} 次重试...")
                        # 重试前休眠更长时间
                        time.sleep(random.uniform(5, 10))
            except Exception as e:
                print(f"抓取页面 {url} 时出错: {e}")
                retry_count += 1
                if retry_count < max_retries:
                    print(f"第 {retry_count} 次重试...")
                    time.sleep(random.uniform(5, 10))
        
        if not success:
            print(f"页面 {url} 在 {max_retries} 次重试后仍然失败，跳过该页面")
            continue
            
            # 解析HTML
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 获取所有电影项
            movie_items = soup.select('ol.grid_view li')
            
            for item in movie_items:
                try:
                    # 电影名称
                    title_tags = item.select('span.title')
                    # 中文标题
                    title_cn = title_tags[0].text.strip() if title_tags else ""
                    # 外文标题
                    title_en = ""
                    if len(title_tags) > 1:
                        title_en = title_tags[1].text.strip().replace('/ ', '')
                    
                    # 导演和主演信息
                    info_tag = item.select_one('div.bd p')
                    if info_tag:
                        info_text = info_tag.text.strip().split('\n')
                        if len(info_text) > 0:
                            director_actor_line = info_text[0].strip()
                            # 提取导演信息
                            if '导演: ' in director_actor_line:
                                director_part = director_actor_line.split('导演: ')[1]
                                if '主演: ' in director_part:
                                    director = director_part.split('主演: ')[0].strip()
                                    actors = director_part.split('主演: ')[1].strip() if len(director_part.split('主演: ')) > 1 else ""
                                else:
                                    director = director_part.strip()
                                    actors = ""
                            else:
                                director = ""
                                actors = ""
                        
                        # 提取年份、国家和类型信息
                        year_country_genre = ""
                        if len(info_text) > 1:
                            year_country_genre = info_text[1].strip()
                        
                        # 分割年份、国家和类型
                        year = ""
                        country = ""
                        genre = ""
                        if year_country_genre:
                            parts = year_country_genre.split('&nbsp;/&nbsp;')
                            if len(parts) > 0:
                                year = parts[0].strip()
                            if len(parts) > 1:
                                country = parts[1].strip()
                            if len(parts) > 2:
                                genre = parts[2].strip()
                    else:
                        director = ""
                        actors = ""
                        year = ""
                        country = ""
                        genre = ""
                    
                    # 评分
                    rating_tag = item.select_one('span.rating_num')
                    rating = rating_tag.text.strip() if rating_tag else ""
                    
                    # 评分人数
                    rating_people_tag = item.find('span', string=lambda text: text and '人评价' in text)
                    rating_people = rating_people_tag.text.strip() if rating_people_tag else ""
                    
                    # 短评
                    quote_tag = item.select_one('span.inq')
                    quote = quote_tag.text.strip() if quote_tag else ""
                    
                    # 将信息添加到列表
                    movies_list.append({
                        '电影名称(中文)': title_cn,
                        '电影名称(外文)': title_en,
                        '导演': director,
                        '主演': actors,
                        '年份': year,
                        '国别': country,
                        '类型': genre,
                        '评分': rating,
                        '评分人数': rating_people,
                        '短评': quote
                    })
                    
                except Exception as e:
                    logger.error(f"解析电影信息时出错: {e}")
                    continue
            
            # 抓取成功后，先随机休眠5-10秒，避免被反爬
            sleep_time = random.uniform(5, 10)
            logger.info(f"页面抓取成功，休眠 {sleep_time:.2f} 秒")
            time.sleep(sleep_time)
            
        # 页面处理完成后随机休眠更长时间，模拟真实用户行为
        sleep_time = random.uniform(8, 15)
        logger.info(f"完成一页处理，休眠 {sleep_time:.2f} 秒")
        time.sleep(sleep_time)
    
    return movies_list

# 保存数据到Excel
def save_to_excel(movies_list, filename="豆瓣电影Top250.xlsx"):
    if not movies_list:
        print("没有抓取到数据，无法保存")
        return
    
    # 创建DataFrame
    df = pd.DataFrame(movies_list)
    
    # 保存到Excel文件
    try:
        df.to_excel(filename, index=False, engine='openpyxl')
        print(f"数据已成功保存到 {filename}")
        print(f"共抓取到 {len(movies_list)} 部电影信息")
    except Exception as e:
        print(f"保存Excel文件时出错: {e}")

# 主函数
def main():
    logger.info("开始抓取豆瓣电影Top250信息...")
    start_time = time.time()
    
    # 抓取数据
    movies_list = crawl_douban_top250()
    
    # 保存数据
    save_to_excel(movies_list)
    
    end_time = time.time()
    logger.info(f"抓取完成，总耗时: {end_time - start_time:.2f} 秒")

if __name__ == "__main__":
    main()