import requests
from bs4 import BeautifulSoup
import csv
import time
import os
import re
import random
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

class IMDBCrawler:
    """
    IMDb电影信息爬虫类
    用于从IMDb网站爬取电影信息，包括基本信息、海报等
    """
    def __init__(self):
        """
        初始化爬虫类
        设置会话、请求头、文件路径等基本配置
        """
        # 创建持久会话，用于维持cookie和连接
        self.session = requests.Session()
        
        # 配置请求重试策略
        retry_strategy = Retry(
            total=3,  # 最大重试次数
            backoff_factor=1,  # 重试间隔因子
            status_forcelist=[429, 500, 502, 503, 504],  # 需要重试的HTTP状态码
            allowed_methods=["GET"]  # 允许重试的请求方法
        )
        adapter = HTTPAdapter(max_retries=retry_strategy)
        self.session.mount("https://", adapter)
        
        # 设置请求头，模拟浏览器行为
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Connection': 'keep-alive',
            'Upgrade-Insecure-Requests': '1',
            'Cache-Control': 'max-age=0',
            'Sec-Ch-Ua': '"Google Chrome";v="119", "Chromium";v="119", "Not?A_Brand";v="24"',
            'Sec-Ch-Ua-Mobile': '?0',
            'Sec-Ch-Ua-Platform': '"Windows"',
            'Sec-Fetch-Dest': 'document',
            'Sec-Fetch-Mode': 'navigate',
            'Sec-Fetch-Site': 'none',
            'Sec-Fetch-User': '?1',
        }
        
        # 将请求头应用到会话
        self.session.headers.update(self.headers)
        
        # 设置基本URL和文件路径
        self.url = 'https://www.imdb.com/title/'  # IMDb电影页面的基础URL
        self.movie_csv_path = 'links.csv'  # 存储电影ID的CSV文件路径
        self.poster_save_path = './poster'  # 海报保存目录
        self.info_save_path = './info.csv'  # 电影信息保存文件路径
        
        # 创建海报保存目录
        if not os.path.exists(self.poster_save_path):
            os.makedirs(self.poster_save_path)
            print(f"创建海报保存目录: {self.poster_save_path}")
        
        # 检查电影ID文件是否存在
        if not os.path.exists(self.movie_csv_path):
            print(f"错误: 链接文件 {self.movie_csv_path} 不存在!")
            print("请创建一个包含电影ID的CSV文件，格式为: 自定义ID,IMDbID")
            exit(1)
            
        # 初始化会话，访问IMDb首页以获取必要的cookie
        try:
            print("初始化会话，访问IMDB首页...")
            init_response = self.session.get('https://www.imdb.com/', timeout=15)
            print(f"IMDB首页访问状态码: {init_response.status_code}")
            time.sleep(3)  # 等待cookie生效
        except Exception as e:
            print(f"初始化会话失败: {str(e)}")
    
    def get_movie_ids(self):
        """
        从CSV文件中读取电影ID
        返回: 包含电影ID的字典，键为自定义ID，值为IMDb ID
        """
        movie_ids = {}
        try:
            with open(self.movie_csv_path, encoding='utf-8') as file:
                reader = csv.reader(file)
                header = next(reader, None)  # 跳过CSV标题行
                print(f"读取CSV文件: {self.movie_csv_path}")
                print(f"标题行: {header}")
                
                count = 0
                for line in reader:
                    if len(line) >= 2:
                        movie_ids[line[0]] = line[1].strip()  # 去除可能的空格
                        count += 1
                
                print(f"从CSV文件中读取了 {count} 个电影ID")
                if count == 0:
                    print("警告: 没有找到任何电影ID!")
                
                return movie_ids
        except Exception as e:
            print(f"读取CSV文件时出错: {str(e)}")
            return {}
    
    def fetch_movie_info(self, imdb_id):
        """
        获取电影页面的HTML内容
        参数:
            imdb_id: IMDb电影ID
        返回: 电影页面的HTML内容，如果失败则返回None
        """
        # 确保IMDb ID格式正确
        if not imdb_id.startswith('tt'):
            imdb_id = f'tt{imdb_id}'
        
        url = f'{self.url}{imdb_id}'
        print(f"获取电影信息: {url}")
        
        # 添加引用页头信息
        referer = 'https://www.imdb.com/chart/top/'
        custom_headers = {
            'Referer': referer
        }
        
        try:
            # 随机等待1-3秒，避免请求过于频繁
            wait_time = random.uniform(1, 3)
            print(f"随机等待 {wait_time:.2f} 秒...")
            time.sleep(wait_time)
            
            # 发送请求获取页面内容
            response = self.session.get(
                url, 
                headers=custom_headers, 
                timeout=15,
                allow_redirects=True
            )
            print(f"状态码: {response.status_code}")

            # 处理不同的响应状态
            if response.status_code == 200:
                return response.content
            elif response.status_code == 202:
                print("收到202状态码 (Accepted)，尝试处理中...")
                # 保存响应内容以便分析
                with open('response_202.html', 'wb') as f:
                    f.write(response.content)
                    
                # 处理重定向
                if 'Location' in response.headers:
                    redirect_url = response.headers['Location']
                    print(f"尝试跟随重定向到: {redirect_url}")
                    time.sleep(2)
                    redirect_response = self.session.get(redirect_url, timeout=15)
                    if redirect_response.status_code == 200:
                        return redirect_response.content
                
                # 重试原始请求
                print("尝试再次请求...")
                time.sleep(5)
                retry_response = self.session.get(url, timeout=15)
                if retry_response.status_code == 200:
                    return retry_response.content
                
                print(f"无法处理202状态码，保存响应内容到response_202.html")
                return None
            else:
                print(f"请求失败，状态码: {response.status_code}")
                return None
        except Exception as e:
            print(f"请求电影页面时出错: {str(e)}")
            return None
    
    def parse_movie_info(self, html):
        """
        解析电影页面的HTML内容，提取所需信息
        参数:
            html: 电影页面的HTML内容
        返回: 包含电影信息的列表
        """
        try:
            soup = BeautifulSoup(html, 'html.parser')
            
            # 解析各项电影信息
            # 1. 颜色类型
            color = 'Color' if soup.find(string=re.compile('Color')) else 'Black and White'
            
            # 2. 导演姓名
            director_name = ''
            director_elem = soup.select_one('a[data-testid="title-pc-principal-credit"]')
            if director_elem:
                director_name = director_elem.get_text(strip=True)
            else:
                credit_summary = soup.find('div', class_='credit_summary_item')
                if credit_summary:
                    director_name = credit_summary.a.get_text(strip=True)
            
            # 3. 评论家评论数
            num_critic_for_reviews = ''
            critic_elem = soup.find('span', string=re.compile('Critic reviews'))
            if critic_elem:
                num_critic_for_reviews = re.findall(r'\d+', critic_elem.text)
                num_critic_for_reviews = num_critic_for_reviews[0] if num_critic_for_reviews else ''
            
            # 4. 电影时长
            duration = ''
            duration_elem = soup.select_one('li[data-testid="title-techspec_runtime"]')
            if duration_elem:
                duration = duration_elem.get_text(strip=True)
            

            
            # 7. 演员2姓名
            actor_2_name = ''
            actor_elems = soup.select('a[data-testid="title-cast-item__actor"]')
            if actor_elems and len(actor_elems) > 1:
                actor_2_name = actor_elems[1].get_text(strip=True)
            

            # 9. 全球票房
            gross = ''
            gross_elem = soup.find('li', string=re.compile('Gross worldwide'))
            if gross_elem:
                gross = gross_elem.get_text(strip=True)
            
            # 10. 电影类型
            genres = []
            genre_elems = soup.select('a[data-testid="genres"]')
            if genre_elems:
                genres = [g.get_text(strip=True) for g in genre_elems]
            
            # 11. 演员1姓名
            actor_1_name = ''
            if actor_elems:
                actor_1_name = actor_elems[0].get_text(strip=True)
            
            # 12. 电影标题
            movie_title = ''
            title_h1 = soup.select_one('h1[data-testid="hero__pageTitle"]')
            if title_h1:
                movie_title = title_h1.get_text(strip=True)
            
            # 13. 投票用户数
            num_voted_users = ''
            votes_elem = soup.select_one('div[data-testid="hero-rating-bar__aggregate-rating__score"] + div')
            if votes_elem:
                num_voted_users = votes_elem.get_text(strip=True)

            
            # 15. 演员3姓名
            actor_3_name = ''
            if actor_elems and len(actor_elems) > 2:
                actor_3_name = actor_elems[2].get_text(strip=True)
            

            
            # 17. 剧情关键词
            plot_keywords = ''
            keywords_elem = soup.find('div', {'data-testid': 'storyline-plot-keywords'})
            if keywords_elem:
                plot_keywords = keywords_elem.get_text(strip=True)
            
            # 18. IMDb链接
            movie_imdb_link = self.url + soup.find('meta', property='imdb:pageConst')['content'] if soup.find('meta', property='imdb:pageConst') else ''
            
            # 19. 用户评论数
            num_user_for_reviews = ''
            reviews_elem = soup.find('span', string=re.compile('User reviews'))
            if reviews_elem:
                num_user_for_reviews = re.findall(r'\d+', reviews_elem.text)
                num_user_for_reviews = num_user_for_reviews[0] if num_user_for_reviews else ''
            
            # 20. 语言
            language = ''
            lang_elem = soup.find('li', string=re.compile('Language'))
            if lang_elem:
                language = lang_elem.get_text(strip=True)
            
            # 21. 国家
            country = ''
            country_elem = soup.find('li', string=re.compile('Country'))
            if country_elem:
                country = country_elem.get_text(strip=True)
            
            # 22. 内容分级
            content_rating = ''
            rating_elem = soup.find('li', string=re.compile('Certification'))
            if rating_elem:
                content_rating = rating_elem.get_text(strip=True)
            
            # 23. 预算
            budget = ''
            budget_elem = soup.find('li', string=re.compile('Budget'))
            if budget_elem:
                budget = budget_elem.get_text(strip=True)
            
            # 24. 发行年份
            title_year = ''
            year_elem = soup.select_one('span[data-testid="title-details-releasedate"]')
            if year_elem:
                title_year = year_elem.get_text(strip=True)
            

            
            # 26. IMDb评分
            imdb_score = ''
            rating_elem = soup.select_one('span[data-testid="hero-rating-bar__aggregate-rating__score"]')
            if rating_elem:
                imdb_score = rating_elem.get_text(strip=True)
            
            # 27. 画面比例
            aspect_ratio = ''
            ratio_elem = soup.find('li', string=re.compile('Aspect ratio'))
            if ratio_elem:
                aspect_ratio = ratio_elem.get_text(strip=True)
            
            # 28. 电影Facebook点赞数
            movie_facebook_likes = '0'  # IMDb不直接提供此信息
            
            print(f"解析结果: 片名={movie_title}")
            return [
                color, director_name, num_critic_for_reviews, duration,
                 actor_2_name,
                gross, '|'.join(genres), actor_1_name,
                movie_title, num_voted_users,
                actor_3_name, plot_keywords,
                movie_imdb_link, num_user_for_reviews, language, country,
                content_rating, budget, title_year,
                imdb_score, aspect_ratio, movie_facebook_likes
            ]

        except Exception as e:
            print(f"解析电影信息时出错: {str(e)}")
            return ['']*28  # 更新返回空列表的长度
    
    def save_poster(self, movie_id, poster_url):
        """
        下载并保存电影海报
        参数:
            movie_id: 电影ID
            poster_url: 海报URL
        返回: 是否成功保存
        """
        if not poster_url:
            print(f"警告: 电影 {movie_id} 没有海报URL")
            return False
        
        try:
            # 下载海报图片
            response = self.session.get(poster_url, timeout=10)
            if response.status_code == 200:
                with open(f'{self.poster_save_path}/{movie_id}.jpg', 'wb') as file:
                    file.write(response.content)
                print(f"保存海报: {movie_id}.jpg")
                return True
            else:
                print(f"下载海报失败，状态码: {response.status_code}")
                return False
        except Exception as e:
            print(f"保存海报时出错: {str(e)}")
            return False
    
    def save_info(self, movie_id, data):
        """
        保存电影信息到CSV文件
        参数:
            movie_id: 电影ID
            data: 电影信息列表
        返回: 是否成功保存
        """
        try:
            # 检查文件是否存在，不存在则创建并写入标题行
            file_exists = os.path.isfile(self.info_save_path)
            
            with open(self.info_save_path, 'a', encoding='utf-8', newline='') as file:
                writer = csv.writer(file)
                if not file_exists:
                    writer.writerow([
                        'color,颜色类型',
                        'director_name,导演姓名',
                        'num_critic_for_reviews,评论家评论数',
                        'duration,电影时长',
                        'director_facebook_likes,导演Facebook点赞数',
                        'actor_3_facebook_likes,演员3的Facebook点赞数',
                        'actor_2_name,演员2姓名',
                        'actor_1_facebook_likes,演员1的Facebook点赞数',
                        'gross,全球票房',
                        'genres,电影类型',
                        'actor_1_name,演员1姓名',
                        'movie_title,电影标题',
                        'num_voted_users,投票用户数',
                        'cast_total_facebook_likes,演员总Facebook点赞数',
                        'actor_3_name,演员3姓名',
                        'facenumber_in_poster,海报中的人脸数',
                        'plot_keywords,剧情关键词',
                        'movie_imdb_link,IMDb链接',
                        'num_user_for_reviews,用户评论数',
                        'language,语言',
                        'country,国家',
                        'content_rating,内容分级',
                        'budget,预算',
                        'title_year,发行年份',
                        'actor_2_facebook_likes,演员2的Facebook点赞数',
                        'imdb_score,IMDb评分',
                        'aspect_ratio,画面比例',
                        'movie_facebook_likes,电影Facebook点赞数'
                    ])
                
                writer.writerow(data)
            print(f"保存电影信息: ID={movie_id}, 名称={data[11]}")  # 更新索引以匹配新的数据结构
            return True
        except Exception as e:
            print(f"保存电影信息时出错: {str(e)}")
            return False
    
    def run(self):
        """
        运行爬虫的主函数
        处理所有电影ID，获取信息并保存
        """
        print("=== IMDb电影信息爬虫开始运行 ===")
        movie_ids = self.get_movie_ids()
        
        if not movie_ids:
            print("没有找到要爬取的电影ID，请检查CSV文件")
            return
        
        success_count = 0
        fail_count = 0
        
        # 遍历处理每个电影
        for movie_id, imdb_id in movie_ids.items():
            print(f"\n--- 处理电影: {movie_id} (IMDb: {imdb_id}) ---")
            html = self.fetch_movie_info(imdb_id)
            
            if html:
                data = self.parse_movie_info(html)
                poster_url = data[27] if len(data) > 27 else ''
                poster_saved = self.save_poster(movie_id, poster_url)
                info_saved = self.save_info(movie_id, data)
                
                if poster_saved and info_saved:
                    success_count += 1
                else:
                    fail_count += 1
            else:
                print(f"无法获取电影 {movie_id} 的HTML内容")
                fail_count += 1
            
            # 根据失败次数动态调整延迟时间
            delay = 3 + (2 * fail_count)
            print(f"等待 {delay} 秒后继续...")
            time.sleep(delay)
        
        # 打印最终统计信息
        print("\n=== IMDb电影信息爬虫运行完成 ===")
        print(f"总共处理: {len(movie_ids)} 部电影")
        print(f"成功: {success_count} 部")
        print(f"失败: {fail_count} 部")

if __name__ == '__main__':
    # 创建爬虫实例并运行
    crawler = IMDBCrawler()
    crawler.run()
