#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
漫画爬取脚本
爬取 https://om0813.zw7gmc49.work/comics/classifys 页面的漫画信息
处理下拉加载功能
"""

import time
import json
import os
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.common.exceptions import TimeoutException, NoSuchElementException
# import undetected_chromedriver as uc  # 注释掉，因为与Python 3.12不兼容
from datetime import datetime
from 登录 import LoginManager  # 导入登录模块

class ComicScraper:
    def __init__(self, headless=True, username=None, password=None):
        """
        初始化爬虫
        
        Args:
            headless (bool): 是否使用无头模式
            username (str): 登录用户名
            password (str): 登录密码
        """
        self.base_url = "https://om0813.zw7gmc49.work/comics/classifys"
        self.driver = None
        self.headless = headless
        self.comics_data = []
        self.username = username
        self.password = password
        self.login_manager = None
        
    def setup_driver(self):
        """设置Chrome浏览器驱动并登录"""
        try:
            print("开始设置浏览器驱动和登录...")
            
            # 创建登录管理器
            self.login_manager = LoginManager(headless=self.headless)
            
            # 尝试登录
            if self.username and self.password:
                if self.login_manager.login(username=self.username, password=self.password):
                    print("登录成功！")
                    self.driver = self.login_manager.get_driver()
                    return True
                else:
                    print("登录失败！")
                    return False
            else:
                print("未提供用户名和密码，尝试自动登录...")
                if self.login_manager.login():
                    print("自动登录成功！")
                    self.driver = self.login_manager.get_driver()
                    return True
                else:
                    print("自动登录失败！")
                    return False
            
        except Exception as e:
            print(f"设置浏览器驱动失败: {e}")
            return False
    
    def wait_for_page_load(self, timeout=30):
        """等待页面加载完成"""
        try:
            # 等待页面主要内容加载
            WebDriverWait(self.driver, timeout).until(
                EC.presence_of_element_located((By.TAG_NAME, "body"))
            )
            print("页面基础内容已加载，等待10秒让广告消失...")
            time.sleep(20)  # 等待10秒让广告消失
            print("广告等待完成，继续执行...")
            
            # 等待页面内容真正加载完成
            print("等待页面内容完全加载...")
            time.sleep(5)  # 额外等待时间确保JavaScript完全执行
            
            return True
        except TimeoutException:
            print("页面加载超时")
            return False
    
    def scroll_to_load_more(self, max_scrolls=10, scroll_pause=2):
        """
        滚动页面触发下拉加载
        
        Args:
            max_scrolls (int): 最大滚动次数
            scroll_pause (int): 每次滚动后的等待时间
        """
        print("开始滚动加载更多内容...")
        
        last_height = self.driver.execute_script("return document.body.scrollHeight")
        scroll_count = 0
        
        while scroll_count < max_scrolls:
            # 滚动到页面底部
            self.driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(scroll_pause)
            
            # 检查是否有新内容加载
            new_height = self.driver.execute_script("return document.body.scrollHeight")
            if new_height == last_height:
                print(f"滚动 {scroll_count + 1} 次后没有新内容，停止滚动")
                break
                
            last_height = new_height
            scroll_count += 1
            print(f"完成第 {scroll_count} 次滚动，页面高度: {new_height}")
            
            # 检查是否有加载指示器
            try:
                loading_elements = self.driver.find_elements(By.XPATH, "//*[contains(text(), '加载') or contains(text(), 'Loading') or contains(@class, 'loading')]")
                if loading_elements:
                    print("检测到加载指示器，等待加载完成...")
                    time.sleep(3)
            except:
                pass
        
        print(f"滚动加载完成，共滚动 {scroll_count} 次")
    
    def extract_comic_info(self):
        """提取漫画信息"""
        print("开始提取漫画信息...")
        
        try:
            # 等待一下确保内容完全加载
            time.sleep(3)
            
            # 尝试多种可能的选择器来找到漫画列表
            selectors = [
                "//div[contains(@class, 'comic') or contains(@class, 'manga') or contains(@class, 'item')]",
                "//div[contains(@class, 'card') or contains(@class, 'grid')]",
                "//a[contains(@href, '/comic/') or contains(@href, '/manga/')]",
                "//div[contains(@class, 'list')]//div[contains(@class, 'item')]",
                "//div[contains(@class, 'content')]//div[contains(@class, 'item')]",
                "//div[contains(@class, 'main')]//div[contains(@class, 'item')]",
                "//div[contains(@class, 'container')]//div[contains(@class, 'item')]",
                "//div[contains(@class, 'wrapper')]//div[contains(@class, 'item')]",
                "//div[contains(@class, 'box')]",
                "//div[contains(@class, 'item')]",
                "//div[contains(@class, 'comic-item')]",
                "//div[contains(@class, 'manga-item')]",
                "//div[contains(@class, 'comic') or contains(@class, 'manga') or contains(@class, 'item')]"
            ]
            
            comics_found = []
            
            for selector in selectors:
                try:
                    elements = self.driver.find_elements(By.XPATH, selector)
                    if elements and len(elements) > 1:  # 确保找到多个元素
                        print(f"使用选择器 '{selector}' 找到 {len(elements)} 个元素")
                        comics_found = elements
                        break
                except:
                    continue
            
            if not comics_found:
                # 如果还是找不到，尝试获取页面源码分析
                print("未找到漫画元素，分析页面结构...")
                self.analyze_page_structure()
                return []
            
            print(f"找到 {len(comics_found)} 个漫画项目")
            
            for i, comic in enumerate(comics_found):
                try:
                    comic_info = self.extract_single_comic(comic, i)
                    if comic_info:
                        self.comics_data.append(comic_info)
                except Exception as e:
                    print(f"提取第 {i+1} 个漫画信息时出错: {e}")
                    continue
            
            print(f"成功提取 {len(self.comics_data)} 个漫画信息")
            
        except Exception as e:
            print(f"提取漫画信息时出错: {e}")
    
    def extract_single_comic(self, comic_element, index):
        """提取单个漫画的详细信息"""
        try:
            comic_info = {
                'index': index + 1,
                'title': '',
                'url': '',
                'image': '',
                'description': '',
                'tags': [],
                'timestamp': datetime.now().isoformat()
            }
            
            # 尝试提取标题
            title_selectors = [
                ".//h1", ".//h2", ".//h3", ".//h4", ".//h5", ".//h6",
                ".//div[contains(@class, 'title')]", ".//span[contains(@class, 'title')]",
                ".//a[contains(@class, 'title')]"
            ]
            
            for selector in title_selectors:
                try:
                    title_elem = comic_element.find_element(By.XPATH, selector)
                    if title_elem.text.strip():
                        comic_info['title'] = title_elem.text.strip()
                        break
                except:
                    continue
            
            # 尝试提取链接
            try:
                # 先尝试直接找a标签
                link_elem = comic_element.find_element(By.TAG_NAME, "a")
                comic_info['url'] = link_elem.get_attribute("href")
            except:
                try:
                    # 如果找不到，尝试找父级或子级的a标签
                    parent = comic_element.find_element(By.XPATH, "./..")
                    link_elem = parent.find_element(By.TAG_NAME, "a")
                    comic_info['url'] = link_elem.get_attribute("href")
                except:
                    try:
                        # 尝试找子元素中的a标签
                        link_elem = comic_element.find_element(By.XPATH, ".//a")
                        comic_info['url'] = link_elem.get_attribute("href")
                    except:
                        pass
            
            # 尝试提取图片
            try:
                img_elem = comic_element.find_element(By.TAG_NAME, "img")
                comic_info['image'] = img_elem.get_attribute("src")
            except:
                pass
            
            # 尝试提取描述
            desc_selectors = [
                ".//p", ".//div[contains(@class, 'desc')]", ".//span[contains(@class, 'desc')]",
                ".//div[contains(@class, 'summary')]"
            ]
            
            for selector in desc_selectors:
                try:
                    desc_elem = comic_element.find_element(By.XPATH, selector)
                    if desc_elem.text.strip():
                        comic_info['description'] = desc_elem.text.strip()
                        break
                except:
                    continue
            
            # 尝试提取标签
            try:
                tag_elements = comic_element.find_elements(By.XPATH, ".//span[contains(@class, 'tag')] | .//div[contains(@class, 'tag')]")
                comic_info['tags'] = [tag.text.strip() for tag in tag_elements if tag.text.strip()]
            except:
                pass
            
            return comic_info
            
        except Exception as e:
            print(f"提取漫画 {index + 1} 信息时出错: {e}")
            return None
    
    def analyze_page_structure(self):
        """分析页面结构，帮助调试"""
        print("分析页面结构...")
        
        try:
            # 获取页面标题
            title = self.driver.title
            print(f"页面标题: {title}")
            
            # 获取页面源码
            page_source = self.driver.page_source
            
            # 查找可能包含漫画信息的关键词
            keywords = ['comic', 'manga', '漫画', 'title', 'image', 'description', 'item', 'list', 'grid', 'card']
            for keyword in keywords:
                if keyword in page_source.lower():
                    print(f"页面包含关键词: {keyword}")
            
            # 保存页面源码用于分析
            with open('downloads/page_source.html', 'w', encoding='utf-8') as f:
                f.write(page_source)
            print("页面源码已保存到 downloads/page_source.html")
            
            # 尝试查找所有div元素
            divs = self.driver.find_elements(By.TAG_NAME, "div")
            print(f"页面包含 {len(divs)} 个div元素")
            
            # 查找所有链接
            links = self.driver.find_elements(By.TAG_NAME, "a")
            print(f"页面包含 {len(links)} 个链接")
            
            # 查找所有图片
            images = self.driver.find_elements(By.TAG_NAME, "img")
            print(f"页面包含 {len(images)} 个图片")
            
            # 分析div元素的class属性
            print("\n分析div元素的class属性:")
            class_counts = {}
            for div in divs[:20]:  # 只分析前20个div
                try:
                    class_name = div.get_attribute("class")
                    if class_name:
                        class_counts[class_name] = class_counts.get(class_name, 0) + 1
                except:
                    pass
            
            for class_name, count in sorted(class_counts.items(), key=lambda x: x[1], reverse=True):
                print(f"  {class_name}: {count} 个")
            
            # 尝试查找可能的内容容器
            print("\n尝试查找内容容器:")
            content_selectors = [
                "//div[contains(@class, 'content')]",
                "//div[contains(@class, 'main')]",
                "//div[contains(@class, 'container')]",
                "//div[contains(@class, 'wrapper')]",
                "//div[contains(@class, 'app')]",
                "//div[contains(@id, 'app')]"
            ]
            
            for selector in content_selectors:
                try:
                    elements = self.driver.find_elements(By.XPATH, selector)
                    if elements:
                        print(f"  找到 {selector}: {len(elements)} 个")
                        for elem in elements[:3]:  # 只显示前3个
                            try:
                                print(f"    文本: {elem.text[:100]}...")
                            except:
                                pass
                except:
                    pass
            
        except Exception as e:
            print(f"分析页面结构时出错: {e}")
    
    def save_data(self, filename=None):
        """保存爬取的数据"""
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"comics_data_{timestamp}.json"
        
        try:
            # 确保downloads目录存在
            os.makedirs('downloads', exist_ok=True)
            filepath = os.path.join('downloads', filename)
            
            with open(filepath, 'w', encoding='utf-8') as f:
                json.dump(self.comics_data, f, ensure_ascii=False, indent=2)
            
            print(f"数据已保存到: {filepath}")
            print(f"共保存 {len(self.comics_data)} 条漫画信息")
            
            return filepath
            
        except Exception as e:
            print(f"保存数据时出错: {e}")
            return None
    
    def take_screenshot(self, filename=None):
        """截取页面截图"""
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"comics_page_{timestamp}.png"
        
        try:
            # 确保downloads目录存在
            os.makedirs('downloads', exist_ok=True)
            filepath = os.path.join('downloads', filename)
            
            self.driver.save_screenshot(filepath)
            print(f"截图已保存到: {filepath}")
            
            return filepath
            
        except Exception as e:
            print(f"截图时出错: {e}")
            return None
    
    def run(self):
        """运行爬虫"""
        print("开始运行漫画爬虫...")
        
        try:
            # 设置浏览器驱动
            if not self.setup_driver():
                return False
            
            # 访问目标页面
            print(f"正在访问: {self.base_url}")
            self.driver.get(self.base_url)
            
            # 等待页面加载
            if not self.wait_for_page_load():
                return False
            
            # 截取初始页面截图
            self.take_screenshot("main_screenshot.png")
            
            # 滚动加载更多内容
            self.scroll_to_load_more()
            
            # 截取加载完成后的截图
            self.take_screenshot("loaded_screenshot.png")
            
            # 提取漫画信息
            self.extract_comic_info()
            
            # 保存数据
            if self.comics_data:
                self.save_data("main_scraped_data.json")
            else:
                print("未获取到任何漫画数据")
            
            return True
            
        except Exception as e:
            print(f"运行爬虫时出错: {e}")
            return False
        
        finally:
            # 关闭浏览器
            if self.driver:
                self.driver.quit()
                print("浏览器已关闭")
    
    def close(self):
        """关闭浏览器"""
        if self.login_manager:
            self.login_manager.close()
        elif self.driver:
            self.driver.quit()
            print("浏览器已关闭")

def main():
    """主函数"""
    print("=" * 50)
    print("漫画爬虫启动")
    print("=" * 50)
    
    # 登录凭据
    username = "wangyukunop"
    password = "zxw123456"
    
    # 创建爬虫实例
    scraper = ComicScraper(headless=False, username=username, password=password)
    
    try:
        # 运行爬虫
        success = scraper.run()
        
        if success:
            print("\n爬虫运行完成！")
            print(f"共获取 {len(scraper.comics_data)} 条漫画信息")
        else:
            print("\n爬虫运行失败！")
            
    except KeyboardInterrupt:
        print("\n用户中断爬虫运行")
    except Exception as e:
        print(f"\n运行出错: {e}")
    finally:
        scraper.close()

if __name__ == "__main__":
    main()
