import requests
from bs4 import BeautifulSoup
import os
import time
import json
import random
import re
from concurrent.futures import ThreadPoolExecutor

class DoubanTop100AdvancedSpider:
    def __init__(self):
        self.base_url = "https://movie.douban.com/top250"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
            "Referer": "https://movie.douban.com/",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
            "Connection": "keep-alive"
        }
        # 创建保存图片的文件夹
        if not os.path.exists('images'):
            os.makedirs('images')
        # 创建保存数据的文件夹
        if not os.path.exists('data'):
            os.makedirs('data')
        
        self.max_retries = 3
        self.retry_delay = 5
        self.page_size = 25  # 豆瓣每页显示25部电影
        self.total_movies = 100  # 总共要爬取的电影数量
    
    def get_page(self, url, retries=0):
        """获取页面内容，支持重试"""
        try:
            response = requests.get(url, headers=self.headers, timeout=10)
            if response.status_code == 200:
                return response.text
            elif response.status_code == 403 or response.status_code == 429:
                if retries < self.max_retries:
                    print(f"请求被拒绝，状态码: {response.status_code}，等待后重试...")
                    time.sleep(self.retry_delay * (retries + 1))
                    return self.get_page(url, retries + 1)
                else:
                    print(f"达到最大重试次数，请求失败")
                    return None
            else:
                print(f"请求失败，状态码: {response.status_code}")
                return None
        except requests.exceptions.RequestException as e:
            if retries < self.max_retries:
                print(f"请求异常: {e}，等待后重试...")
                time.sleep(self.retry_delay)
                return self.get_page(url, retries + 1)
            else:
                print(f"达到最大重试次数，请求失败: {e}")
                return None
    
    def parse_movie_list_page(self, html, start_rank):
        """解析电影列表页面"""
        soup = BeautifulSoup(html, 'html.parser')
        movie_list = soup.select('ol.grid_view li')
        
        movies_data = []
        
        for i, movie in enumerate(movie_list):
            # 计算当前电影的排名
            rank = start_rank + i
                
            # 电影详情页链接
            detail_url = movie.select_one('.hd a')['href']
            
            # 电影名称
            title = movie.select_one('.title').text
            
            # 评分
            rating = movie.select_one('.rating_num').text
            
            # 评分人数
            rating_count_element = movie.select_one('.star span:last-child')
            if rating_count_element:
                rating_count = rating_count_element.text
                rating_count = re.search(r'\d+', rating_count).group() if re.search(r'\d+', rating_count) else '0'
            else:
                rating_count = '0'
            
            # 简介
            intro = movie.select_one('.bd p').text.strip() if movie.select_one('.bd p') else ''
            
            # 海报图片链接
            img_url = movie.select_one('.pic img')['src']
            
            print(f"正在处理 [{rank}/100] {title}...")
            
            # 获取更多详细信息
            detail_info = self.get_movie_detail(detail_url)
            
            movie_data = {
                'rank': rank,
                'title': title,
                'rating': rating,
                'rating_count': rating_count,
                'intro': intro,
                'img_url': img_url,
                'detail_url': detail_url
            }
            
            # 合并详细信息
            if detail_info:
                movie_data.update(detail_info)
            
            movies_data.append(movie_data)
            
            # 下载电影海报
            self.download_image(img_url, f"{rank}_{title}")
            
            # 随机暂停1-3秒，避免请求过于频繁
            time.sleep(random.uniform(1, 3))
        
        return movies_data
    
    def get_movie_detail(self, url):
        """获取电影详情页信息"""
        print(f"正在获取电影详情: {url}")
        html = self.get_page(url)
        if not html:
            return {}
            
        soup = BeautifulSoup(html, 'html.parser')
        detail_info = {}
        
        try:
            # 获取导演
            directors = soup.select('#info span.attrs a[rel="v:directedBy"]')
            detail_info['directors'] = [d.text for d in directors]
            
            # 获取编剧
            screenplay_section = soup.find('span', string='编剧')
            if screenplay_section:
                screenwriters = screenplay_section.parent.find_all('a')
                detail_info['screenwriters'] = [s.text for s in screenwriters]
            
            # 获取演员 (获取完整演员列表)
            # 1. 主要演员通常在初始加载的HTML中
            actors = []
            actor_section = soup.find('span', string='主演')
            if actor_section:
                main_actors = actor_section.parent.find_all('a')
                actors = [a.text for a in main_actors if a.get('rel') == ["v:starring"]]
            
            # 2. 从"更多..."链接获取完整演员表
            all_celebrities_link = url.rstrip('/') + '/celebrities'
            try:
                celebrities_html = self.get_page(all_celebrities_link)
                if celebrities_html:
                    celebrities_soup = BeautifulSoup(celebrities_html, 'html.parser')
                    actor_list = celebrities_soup.select('.celebrities-list .celebrity:has(.role:contains("饰"))')
                    if actor_list:
                        actors = [actor.select_one('.name a').text.strip() for actor in actor_list]
            except Exception as e:
                print(f"获取完整演员表失败: {e}")
            
            detail_info['actors'] = actors[:20] if actors else []  # 限制获取前20位演员
            
            # 获取类型
            genres = soup.select('#info span[property="v:genre"]')
            detail_info['genres'] = [g.text for g in genres]
            
            # 获取国家/地区
            country_section = soup.find('span', string='制片国家/地区:')
            if country_section:
                country_text = country_section.next_sibling.strip()
                detail_info['countries'] = [c.strip() for c in country_text.split('/')]
            
            # 获取语言
            language_section = soup.find('span', string='语言:')
            if language_section:
                language_text = language_section.next_sibling.strip()
                detail_info['languages'] = [l.strip() for l in language_text.split('/')]
            
            # 获取上映日期
            release_dates = soup.select('#info span[property="v:initialReleaseDate"]')
            detail_info['release_dates'] = [d.text for d in release_dates]
            
            # 获取片长
            runtime = soup.select_one('#info span[property="v:runtime"]')
            if runtime:
                detail_info['runtime'] = runtime.text
            
            # 获取IMDb链接
            imdb_section = soup.find('span', string='IMDb:')
            if imdb_section:
                imdb_link = imdb_section.next_sibling.strip()
                detail_info['imdb_id'] = imdb_link
            
            # -------------------- 剧情简介部分 --------------------
            # 1. 首先尝试获取完整剧情简介（通常在关闭"折叠"状态的span.all标签中）
            plot_summary = ""
            
            # 尝试查找展开的剧情简介内容
            hidden_summary = soup.select_one('div.related-info div.indent span.all')
            if hidden_summary:
                plot_summary = hidden_summary.get_text(strip=True).replace('\n\n', '\n').strip()
            
            # 如果没找到展开的内容，尝试查找默认显示的简介内容
            if not plot_summary:
                default_summary = soup.select_one('div.related-info div[property="v:summary"]') or soup.select_one('div.related-info span[property="v:summary"]')
                if default_summary:
                    plot_summary = default_summary.get_text(strip=True).replace('\n\n', '\n').strip()
            
            # 如果仍然没有找到，尝试查找其他可能的简介容器
            if not plot_summary:
                any_summary = soup.select_one('div.related-info div.indent')
                if any_summary:
                    plot_summary = any_summary.get_text(strip=True).replace('\n\n', '\n').strip()
            
            # 2. 如果在主页面找不到完整简介，尝试直接访问剧情简介页面
            if not plot_summary or len(plot_summary) < 50:
                try:
                    plot_url = url + 'plots/'
                    plot_html = self.get_page(plot_url)
                    if plot_html:
                        plot_soup = BeautifulSoup(plot_html, 'html.parser')
                        # 尝试获取第一个（通常是豆瓣官方）剧情简介
                        plot_content = plot_soup.select_one('#content .article')
                        if plot_content:
                            # 移除页面上的导航和标题元素，只保留简介内容
                            for nav in plot_content.select('.c-aside-body, h1, .tabs, .c-aside'):
                                if nav:
                                    nav.decompose()
                            plot_summary = plot_content.get_text(strip=True).replace('\n\n', '\n').strip()
                except Exception as e:
                    print(f"获取剧情简介页面失败: {e}")
            
            # 如果剧情简介为空，使用列表页的简介作为备选
            if not plot_summary:
                plot_summary = detail_info.get('intro', '')
            
            # 清理剧情简介文本
            if plot_summary:
                # 移除多余的空格和换行
                plot_summary = re.sub(r'\s+', ' ', plot_summary).strip()
                # 移除常见的前缀文字
                plot_summary = re.sub(r'^(剧情简介|内容简介|故事梗概)[:：]?\s*', '', plot_summary)
            
            # 保存剧情简介
            detail_info['plot_summary'] = plot_summary
                
            # 获取高清海报图片链接（如果有）
            hd_img_link = None
            try:
                hd_img_link = soup.select_one('#mainpic a img')['src']
                if hd_img_link and hd_img_link != detail_info.get('img_url'):
                    detail_info['hd_img_url'] = hd_img_link
            except:
                pass
            
            # 添加年份信息
            year_match = re.search(r'\((\d{4})\)', soup.select_one('h1 span.year').text) if soup.select_one('h1 span.year') else None
            if year_match:
                detail_info['year'] = year_match.group(1)
            
            return detail_info
            
        except Exception as e:
            print(f"解析电影详情页异常: {e}")
            return {}
    
    def download_image(self, url, filename):
        """下载图片"""
        try:
            response = requests.get(url, headers=self.headers, timeout=10)
            if response.status_code == 200:
                # 替换文件名中的特殊字符
                filename = filename.replace('/', '_').replace('\\', '_').replace(':', '_')
                filename = filename.replace('*', '_').replace('?', '_').replace('"', '_')
                filename = filename.replace('<', '_').replace('>', '_').replace('|', '_')
                
                file_path = os.path.join('images', f"{filename}.jpg")
                with open(file_path, 'wb') as f:
                    f.write(response.content)
                print(f"图片已保存: {file_path}")
                return True
            else:
                print(f"下载图片失败，状态码: {response.status_code}")
                return False
        except Exception as e:
            print(f"下载图片异常: {e}")
            return False
    
    def save_to_json(self, data):
        """保存数据为JSON文件"""
        file_path = os.path.join('data', 'douban_top100_detailed.json')
        with open(file_path, 'w', encoding='utf-8-sig') as f:
            json.dump(data, f, ensure_ascii=False, indent=4)
        print(f"数据已保存: {file_path}")
    
    def save_to_csv(self, data):
        """保存数据为CSV文件"""
        import csv
        file_path = os.path.join('data', 'douban_top100.csv')
        
        # 获取所有可能的字段
        fields = set()
        for movie in data:
            fields.update(movie.keys())
        fields = sorted(list(fields))
        
        with open(file_path, 'w', encoding='utf-8-sig', newline='') as f:
            writer = csv.DictWriter(f, fieldnames=fields)
            writer.writeheader()
            writer.writerows(data)
        print(f"CSV数据已保存: {file_path}")
    
    def run(self):
        """运行爬虫"""
        print("开始爬取豆瓣电影TOP100详细信息...")
        
        all_movies = []
        
        # 豆瓣TOP100分页，每页25部电影，共4页
        for page in range(4):
            start = page * self.page_size
            url = f"{self.base_url}?start={start}"
            
            print(f"正在爬取第{page + 1}/4页，电影排名 {start + 1}-{start + self.page_size}...")
            
            html = self.get_page(url)
            if not html:
                print(f"获取第{page + 1}页失败，跳过")
                continue
                
            # 解析当前页电影列表
            movies_on_page = self.parse_movie_list_page(html, start + 1)
            all_movies.extend(movies_on_page)
            
            print(f"第{page + 1}页爬取完成，当前已获取{len(all_movies)}部电影")
            
            # 页面之间添加较长的随机暂停，避免被封IP
            if page < 3:  # 最后一页后不需要暂停
                sleep_time = random.uniform(3, 8)
                print(f"等待{sleep_time:.1f}秒后继续爬取下一页...")
                time.sleep(sleep_time)
        
        # 保存所有电影数据
        if all_movies:
            self.save_to_json(all_movies)
            self.save_to_csv(all_movies)
            print(f"成功爬取 {len(all_movies)} 部电影详细信息")
        else:
            print("爬取失败，未获取到电影数据")

if __name__ == '__main__':
    spider = DoubanTop100AdvancedSpider()
    spider.run() 