import time
import json
import random
import os
import re
import undetected_chromedriver as uc
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import requests
import logging
import sys

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.StreamHandler(sys.stdout),
        logging.FileHandler('maoyan_selenium.log', encoding='utf-8')
    ]
)
logger = logging.getLogger(__name__)

# 随机UA列表
USER_AGENTS = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/117.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/119.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/17.0 Safari/605.1.15',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/116.0.0.0 Safari/537.36 Edg/116.0.1938.69'
]

class MaoyanTop100SeleniumSpider:
    def __init__(self):
        self.base_url = "https://www.maoyan.com/board/4"
        self.total_movies = 100
        self.movies = []
        self.max_retries = 3  # 每页最大重试次数
        
        # 创建所需文件夹
        self.create_directories()
        
    def create_directories(self):
        """创建所需的文件夹"""
        folders = ['maoyan_images', 'maoyan_data', 'maoyan_logs']
        for folder in folders:
            if not os.path.exists(folder):
                os.makedirs(folder)
                logger.info(f"创建文件夹: {folder}")
    
    def setup_browser(self):
        """设置并返回浏览器"""
        try:
            # 使用undetected_chromedriver以绕过检测
            options = uc.ChromeOptions()
            
            # 随机选择一个UA
            user_agent = random.choice(USER_AGENTS)
            options.add_argument(f'user-agent={user_agent}')
            
            # 其他设置
            options.add_argument("--disable-gpu")
            options.add_argument("--no-sandbox")
            options.add_argument("--disable-dev-shm-usage")
            options.add_argument("--disable-extensions")
            
            # 不使用无头模式，无头模式容易被检测
            # options.add_argument('--headless')
            
            # 启动浏览器
            driver = uc.Chrome(options=options)
            driver.maximize_window()
            driver.set_page_load_timeout(30)  # 设置页面加载超时
            
            logger.info(f"浏览器启动成功，使用UA: {user_agent}")
            return driver
        except Exception as e:
            logger.critical(f"浏览器启动失败: {e}")
            raise
    
    def save_to_json(self, data):
        """保存数据为JSON格式"""
        if not data:
            logger.warning("没有数据可保存为JSON")
            return
            
        file_path = os.path.join('maoyan_data', 'maoyan_top100_selenium.json')
        with open(file_path, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=4)
        logger.info(f"数据已保存: {file_path}")

    def save_to_csv(self, data):
        """保存数据为CSV格式"""
        if not data:
            logger.warning("没有数据可保存为CSV")
            return
            
        file_path = os.path.join('maoyan_data', 'maoyan_top100_selenium.csv')
        with open(file_path, 'w', encoding='utf-8') as f:
            # 写入CSV头部
            f.write('排名,电影名称,评分,主演,上映时间,详情页链接,海报图片\n')
            # 写入数据
            for movie in data:
                # 处理逗号，确保CSV格式正确
                title = movie['title'].replace(',', '，') if 'title' in movie else "未知"
                stars = movie['stars'].replace(',', '，') if 'stars' in movie else "无主演信息"
                release_time = movie['release_time'].replace(',', '，') if 'release_time' in movie else "未知"
                poster = movie.get('poster', '无海报')
                
                row = f"{movie.get('rank', 0)},{title},{movie.get('score', '0')},{stars},{release_time},{movie.get('detail_url', '')},{poster}\n"
                f.write(row)
        logger.info(f"CSV数据已保存: {file_path}")
    
    def download_image(self, url, filename):
        """下载图片"""
        if not url:
            logger.warning(f"图片URL为空，无法下载: {filename}")
            return False
            
        try:
            # 随机UA和随机延迟
            headers = {
                "User-Agent": random.choice(USER_AGENTS),
                "Referer": "https://www.maoyan.com/"
            }
            
            # 重试机制
            for attempt in range(3):
                try:
                    response = requests.get(url, headers=headers, timeout=15)
                    if response.status_code == 200:
                        file_path = os.path.join('maoyan_images', filename)
                        with open(file_path, 'wb') as f:
                            f.write(response.content)
                        logger.info(f"图片已保存: {filename}")
                        return True
                    else:
                        logger.warning(f"下载图片失败，状态码: {response.status_code}，尝试: {attempt+1}/3")
                        time.sleep(random.uniform(1, 3))
                except Exception as e:
                    logger.warning(f"下载图片出错: {e}，尝试: {attempt+1}/3")
                    time.sleep(random.uniform(1, 3))
            
            logger.error(f"多次尝试下载图片失败: {filename}")
            return False
        except Exception as e:
            logger.error(f"下载图片异常: {e}")
            return False
    
    def parse_movie_list(self, page_source, start_rank):
        """解析电影列表页面"""
        movies_list = []
        soup = BeautifulSoup(page_source, 'html.parser')
        
        # 保存HTML到日志文件
        with open(f'maoyan_logs/selenium_page_{start_rank}.html', 'w', encoding='utf-8') as f:
            f.write(page_source)
        
        # 使用CSS选择器获取电影条目
        movies = soup.select('.board-item')
        
        if not movies:
            logger.error("未找到电影列表元素，可能页面结构已变化")
            # 尝试其他选择器
            movies = soup.select('.movie-item-title') or soup.select('.name')
            if movies:
                logger.info("使用备用选择器找到了电影元素")
            else:
                return movies_list
        
        logger.info(f"找到 {len(movies)} 个电影条目")
        
        for i, movie in enumerate(movies, 1):
            try:
                rank = start_rank + i - 1
                
                # 电影标题和详情链接
                title_tag = movie.select_one('.name a') or movie.select_one('a')
                if title_tag:
                    title = title_tag.get_text(strip=True)
                    detail_url = f"https://www.maoyan.com{title_tag.get('href', '')}"
                else:
                    title = f"未知电影{rank}"
                    detail_url = ""
                
                # 电影评分
                score_tag = movie.select_one('.score') or movie.select_one('.integer') 
                if score_tag:
                    score = score_tag.get_text(strip=True)
                else:
                    score = "无评分"
                
                # 主演
                star_tag = movie.select_one('.star')
                stars = star_tag.get_text(strip=True) if star_tag else "无主演信息"
                
                # 上映时间
                release_tag = movie.select_one('.releasetime') or movie.select_one('.date')
                release_time = release_tag.get_text(strip=True) if release_tag else "未知上映时间"
                
                # 构建电影数据
                movie_info = {
                    'rank': rank,
                    'title': title,
                    'score': score,
                    'stars': stars,
                    'release_time': release_time,
                    'detail_url': detail_url
                }
                
                movies_list.append(movie_info)
                logger.info(f"成功解析电影: [{rank}] {title}")
                
            except Exception as e:
                logger.error(f"处理第{i}个电影时出错: {e}")
                continue
        
        return movies_list
    
    def get_movie_detail(self, driver, url, movie_data):
        """获取电影详情页信息，并更新电影数据"""
        if not url:
            logger.warning(f"电影 {movie_data.get('title', 'Unknown')} 没有详情链接")
            return movie_data
            
        try:
            logger.info(f"获取电影详情: {url}")
            
            # 随机等待一段时间
            time.sleep(random.uniform(1.5, 3.5))
            
            # 访问详情页，带重试
            for attempt in range(3):
                try:
                    driver.get(url)
                    
                    # 等待页面加载完成
                    try:
                        WebDriverWait(driver, 10).until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, '.movie-brief-container, .dra, .movie-title'))
                        )
                        logger.info("详情页加载完成")
                        break  # 成功加载，跳出重试循环
                    except:
                        if attempt < 2:  # 最后一次尝试不警告
                            logger.warning(f"详情页等待超时，尝试: {attempt+1}/3")
                            time.sleep(3)  # 等待后重试
                        else:
                            logger.warning("详情页等待超时，尝试继续解析")
                
                except Exception as e:
                    if attempt < 2:
                        logger.warning(f"访问详情页出错: {e}，尝试: {attempt+1}/3")
                        time.sleep(3)
                    else:
                        logger.error(f"多次访问详情页失败: {e}")
                        return movie_data
            
            # 获取页面源码
            page_source = driver.page_source
            
            # 提取电影ID并保存HTML
            movie_id = url.split('/')[-1]
            with open(f'maoyan_logs/selenium_detail_{movie_id}.html', 'w', encoding='utf-8') as f:
                f.write(page_source)
            
            # 解析页面
            soup = BeautifulSoup(page_source, 'html.parser')
            
            # 获取更多详细信息
            info_div = soup.select_one('.movie-brief-container')
            if info_div:
                # 类型
                cat_tag = info_div.select_one('li:nth-child(1)')
                if cat_tag:
                    movie_data['category'] = cat_tag.get_text(strip=True)
                
                # 地区/时长
                area_tag = info_div.select_one('li:nth-child(2)')
                if area_tag:
                    movie_data['area_duration'] = area_tag.get_text(strip=True)
            
            # 获取简介
            summary_tag = soup.select_one('.dra')
            if summary_tag:
                movie_data['summary'] = summary_tag.get_text(strip=True)
            
            # 获取海报图片：尝试多种选择器
            img_url = None
            
            # 1. 尝试获取电影封面图
            cover_img = soup.select_one('.avatar-shadow img') or \
                        soup.select_one('.movie-poster img') or \
                        soup.select_one('.poster-pic img') or \
                        soup.select_one('.movie-img img')
            
            if cover_img:
                img_url = cover_img.get('src') or cover_img.get('data-src')
            
            # 2. 如果找不到，尝试其他可能的图片选择器
            if not img_url:
                all_imgs = soup.select('img')
                for img in all_imgs:
                    potential_url = img.get('src') or img.get('data-src')
                    # 如果URL中包含关键词，可能是电影海报
                    if potential_url and ('poster' in potential_url or 'movie' in potential_url or '.jpg' in potential_url):
                        img_url = potential_url
                        break
            
            # 3. 如果找到图片URL，下载图片
            if img_url:
                # 确保URL是完整的
                if img_url.startswith('//'):
                    img_url = 'https:' + img_url
                
                # 过滤文件名中的非法字符
                safe_title = re.sub(r'[\\/:*?"<>|]', '', movie_data['title'])
                filename = f"{movie_data['rank']}_{safe_title}.jpg"
                
                # 下载图片
                if self.download_image(img_url, filename):
                    movie_data['poster'] = filename
                    movie_data['img_url'] = img_url
            else:
                logger.warning(f"未找到电影 {movie_data['title']} 的海报图片")
            
            logger.info(f"电影详情获取成功: {movie_data['title']}")
            return movie_data
                
        except Exception as e:
            logger.error(f"获取电影详情失败: {e}")
            return movie_data
    
    def crawl_batch(self, driver, page_range):
        """爬取一批电影数据（每批10部）"""
        movies_batch = []
        for page in page_range:
            offset = page * 10
            start_rank = offset + 1
            end_rank = offset + 10
            
            logger.info(f"正在爬取电影排名 {start_rank}-{end_rank}...")
            
            # 构建URL
            if offset == 0:
                url = self.base_url
            else:
                url = f"{self.base_url}?offset={offset}"
            
            retry_count = 0
            success = False
            
            # 重试机制
            while retry_count < self.max_retries and not success:
                try:
                    driver.get(url)
                    
                    # 等待页面加载
                    try:
                        time.sleep(3)  # 先固定等待一段时间
                        WebDriverWait(driver, 15).until(
                            EC.presence_of_element_located((By.CSS_SELECTOR, '.board-item, .movie-item-title'))
                        )
                        logger.info("页面加载完成")
                        success = True
                    except Exception as e:
                        logger.warning(f"等待页面加载超时，尝试继续解析: {e}")
                    
                    # 随机滚动页面，模拟真实用户行为
                    for _ in range(random.randint(2, 4)):
                        scroll_height = random.randint(300, 800)
                        driver.execute_script(f"window.scrollBy(0, {scroll_height});")
                        time.sleep(random.uniform(0.5, 1.5))
                    
                    # 解析当前页面
                    page_source = driver.page_source
                    movies_on_page = self.parse_movie_list(page_source, start_rank)
                    
                    # 如果解析到电影数据，则视为成功
                    if movies_on_page:
                        # 获取电影详情
                        for movie in movies_on_page:
                            if movie.get('detail_url'):
                                movie = self.get_movie_detail(driver, movie['detail_url'], movie)
                        
                        movies_batch.extend(movies_on_page)
                        logger.info(f"排名 {start_rank}-{end_rank} 爬取完成，获取到 {len(movies_on_page)} 部电影")
                        break  # 成功获取数据，跳出重试循环
                    else:
                        logger.warning(f"页面解析失败，未获取到电影数据，重试 ({retry_count+1}/{self.max_retries})")
                        retry_count += 1
                        time.sleep(5)  # 等待后重试
                        
                except Exception as e:
                    logger.error(f"爬取排名 {start_rank}-{end_rank} 出错: {e}")
                    retry_count += 1
                    if retry_count < self.max_retries:
                        wait_time = random.uniform(5, 10)
                        logger.info(f"等待{wait_time:.1f}秒后重试...")
                        time.sleep(wait_time)
            
            # 随机等待，避免请求过于频繁
            if page < max(page_range):  # 不是最后一页
                wait_time = random.uniform(3, 7)
                logger.info(f"等待{wait_time:.1f}秒后继续...")
                time.sleep(wait_time)
        
        return movies_batch
    
    def run(self):
        """运行爬虫"""
        logger.info("开始运行猫眼TOP100电影爬虫 (Selenium版本)")
        
        driver = None
        try:
            driver = self.setup_browser()
            
            # 访问首页，解决可能的验证或弹窗
            logger.info("访问首页...")
            driver.get("https://www.maoyan.com/")
            time.sleep(random.uniform(3, 5))
            
            # 分批次爬取数据，每次爬取不同批次，避免被封
            batch_sizes = [
                range(0, 2),    # 第1-20部
                range(2, 4),    # 第21-40部
                range(4, 7),    # 第41-70部
                range(7, 10)    # 第71-100部
            ]
            
            # 首先尝试直接获取第一页数据
            logger.info("尝试直接访问榜单页面...")
            try:
                driver.get(self.base_url)
                time.sleep(5)  # 给页面足够时间加载
                
                page_source = driver.page_source
                if "猫眼TOP100" in page_source:
                    logger.info("成功加载榜单页面")
                    
                    # 在加载榜单页后增加一些随机滚动和交互，模拟真人行为
                    for _ in range(3):
                        scroll_y = random.randint(300, 1000)
                        driver.execute_script(f"window.scrollTo(0, {scroll_y});")
                        time.sleep(random.uniform(0.5, 2))
                    
                    # 解析榜单页面
                    movies_on_page = self.parse_movie_list(page_source, 1)
                    if movies_on_page:
                        self.movies.extend(movies_on_page)
                        logger.info(f"成功从榜单页面获取了 {len(movies_on_page)} 部电影")
                    
                        # 获取电影详情
                        for movie in movies_on_page:
                            if movie.get('detail_url'):
                                movie = self.get_movie_detail(driver, movie['detail_url'], movie)
                        
                        # 保存当前进度
                        self.save_to_json(self.movies)
                                
                        # 第一页获取成功后，直接从第二页开始爬
                        if len(batch_sizes) > 0:
                            batch_sizes[0] = range(1, 2)
                    else:
                        logger.warning("榜单页面解析失败，未能获取电影数据")
                else:
                    logger.warning("未能正确加载榜单页面")
            except Exception as e:
                logger.error(f"访问榜单页面出错: {e}")
            
            # 分批次爬取剩余数据
            batch_count = 1
            for batch in batch_sizes:
                if not batch:  # 跳过空批次
                    continue
                    
                logger.info(f"开始爬取第{batch_count}批数据 (批次范围: {min(batch)*10+1}-{max(batch)*10+10})...")
                movies_batch = self.crawl_batch(driver, batch)
                
                if movies_batch:
                    self.movies.extend(movies_batch)
                    logger.info(f"第{batch_count}批数据爬取完成，当前已获取{len(self.movies)}部电影")
                    
                    # 每批次保存一次数据，避免中途失败
                    self.save_to_json(self.movies)
                
                # 批次之间较长等待，降低被封风险
                if batch_count < len(batch_sizes):
                    wait_time = random.uniform(10, 20)
                    logger.info(f"批次间等待{wait_time:.1f}秒...")
                    time.sleep(wait_time)
                
                batch_count += 1
            
            # 最终保存数据
            self.save_to_json(self.movies)
            self.save_to_csv(self.movies)
            logger.info(f"爬取完成，共获取{len(self.movies)}部电影")
            
            # 统计数据完整性
            ranks = [movie.get('rank') for movie in self.movies]
            missing_ranks = [i for i in range(1, 101) if i not in ranks]
            if missing_ranks:
                logger.warning(f"有{len(missing_ranks)}个排名未获取到: {missing_ranks}")
            else:
                logger.info("所有100部电影数据均已完整获取")
            
        except Exception as e:
            logger.critical(f"爬虫运行出错: {e}", exc_info=True)
        finally:
            # 保存已爬取的数据
            if self.movies:
                self.save_to_json(self.movies)
                self.save_to_csv(self.movies)
                
            # 关闭浏览器
            try:
                if driver:
                    driver.quit()
                    logger.info("浏览器已关闭")
            except:
                pass

if __name__ == "__main__":
    spider = MaoyanTop100SeleniumSpider()
    spider.run() 