import requests
from bs4 import BeautifulSoup
import re
import os
import xlwt
from datetime import datetime
from typing import List, Dict, Optional

# Selenium相关导入
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import time


class Book:
    """图书信息数据类，定义图书信息的全局数据结构"""

    def __init__(self, rank: int = 0):
        self.rank = rank  # 排名
        self.title: str = ""  # 书名
        self.url: str = ""  # 图书链接
        self.img_url: str = ""  # 封面图片URL
        self.rating: float = 0.0  # 评分
        self.rating_count: str = ""  # 评价人数
        self.price: str = ""  # 价格
        self.pub_info: str = ""  # 完整出版信息(作者/译者/出版社/出版日期)
        self.intro: str = ""  # 简介
        self.img_local_path: str = ""  # 本地图片路径
        self.download_success: bool = False  # 图片下载是否成功

    def to_dict(self) -> Dict:
        """将图书信息转换为字典格式"""
        return {
            '排名': self.rank,
            '书名': self.title,
            '图书链接': self.url,
            '封面URL': self.img_url,
            '评分': self.rating,
            '评价人数': self.rating_count,
            '价格': self.price,
            '完整出版信息': self.pub_info,
            '简介': self.intro,
            '本地图片路径': self.img_local_path,
            '图片下载状态': '成功' if self.download_success else '失败'
        }

    def __str__(self):
        """格式化输出图书信息"""
        return f"""
📚 图书 #{self.rank}
📖 书名: {self.title}
⭐ 评分: {self.rating}
👥 评价: {self.rating_count}
📋 完整出版信息: {self.pub_info}
💰 价格: {self.price}
📝 简介: {self.intro if self.intro else '暂无简介'}
🖼️ 图片: {'✅' if self.download_success else '❌'} {os.path.basename(self.img_local_path) if self.img_local_path else '无图片'}
🔗 链接: {self.url}
        """


class DoubanBookSeleniumSpider:
    """豆瓣图书爬虫类 - 专用Selenium+requests+BeautifulSoup版本"""

    def __init__(self, headless: bool = True):
        """
        初始化Selenium爬虫
        :param headless: 是否使用无头模式（不显示浏览器窗口）
        """
        self.base_url = 'https://book.douban.com/top250'
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'
        }
        self.img_folder = os.path.join(os.getcwd(), 'book_covers_selenium')
        self.books: List[Book] = []

        # 初始化Selenium WebDriver
        self._setup_selenium(headless)

    def _setup_selenium(self, headless: bool):
        """设置Selenium驱动"""
        try:
            chrome_options = Options()
            if headless:
                chrome_options.add_argument('--headless')
            # 禁用沙盒模式、禁用dev共享内存、禁用GPU加速、设置窗口大小、设置User-Agent
            chrome_options.add_argument('--no-sandbox')
            chrome_options.add_argument('--disable-dev-shm-usage')
            chrome_options.add_argument('--disable-gpu')
            chrome_options.add_argument('--window-size=1920,1080')
            chrome_options.add_argument(f'--user-agent={self.headers["User-Agent"]}')

            # 使用自动查找chromedriver.exe位置 - 需要将chromedriver-win64目录添加到系统PATH
            # 如果失败，请取消注释下面的手动路径行
            # driver_path = r'D:\example\chromedriver-win64\chromedriver.exe'
            self.driver = webdriver.Chrome(options=chrome_options)

            print("🤖 Selenium WebDriver 初始化成功 (使用自动查找)")
            print("⚠️ 如果初始化失败，请手动设置driver_path路径")

            self.wait = WebDriverWait(self.driver, 10)

        except Exception as e:
            print(f"❌ Selenium初始化失败: {e}")
            print("💡 提示：请确保已安装ChromeDriver")
            print("📍 解决方案:")
            print("   1. 下载ChromeDriver: https://googlechromelabs.github.io/chrome-for-testing/")
            print("   2. 将chromedriver.exe添加到系统PATH")
            print("   3. 或者在代码中手动指定driver_path路径")
            raise

    def close_driver(self):
        """关闭Selenium驱动"""
        if self.driver:
            try:
                self.driver.quit()
                print("🔚 Selenium WebDriver 已关闭")
            except Exception as e:
                print(f"⚠️ 关闭WebDriver时出错: {e}")

    def scrape_books(self, max_pages: int = 3) -> List[Book]:
        """
        使用Selenium爬取豆瓣图书Top250信息
        :param max_pages: 最大爬取页数
        """
        print("🚀 开始使用Selenium爬取豆瓣图书Top250信息...")
        print(f"📄 计划爬取页数: {max_pages}")

        current_page = 1
        current_rank = 1

        while current_page <= max_pages:
            try:
                # 构造当前页URL
                if current_page == 1:
                    page_url = self.base_url
                else:
                    page_url = f"{self.base_url}?start={(current_page-1)*25}"

                print(f"📄 爬取第 {current_page} 页: {page_url}")

                # Selenium访问页面
                self.driver.get(page_url)

                # 等待页面加载完成 - 使用豆瓣读书的正确选择器
                self.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, 'tr.item')))

                # 获取渲染后的HTML源码
                html_content = self.driver.page_source

                # 使用BeautifulSoup解析HTML
                soup = BeautifulSoup(html_content, 'html.parser')

                # 查找图书信息块 - 豆瓣图书使用tr.item选择器
                book_blocks = soup.find_all('tr', class_='item')
                print(f"📊 第 {current_page} 页找到 {len(book_blocks)} 本书")

                # 如果没有找到图书信息，说明已经到了最后一页
                if len(book_blocks) == 0:
                    print(f"⚠️ 第 {current_page} 页没有找到图书信息，可能已到最后一页")
                    break

                # 遍历提取图书信息
                for book_block in book_blocks:
                    book = self._extract_book_info(book_block, current_rank)
                    if book:
                        self.books.append(book)
                        current_rank += 1

                print(f"✅ 第 {current_page} 页处理完成")
                current_page += 1

                # 页面间延迟，避免请求过快
                time.sleep(2)

            except TimeoutException:
                print(f"❌ 第 {current_page} 页加载超时")
                break
            except Exception as e:
                print(f"❌ 处理第 {current_page} 页时出错: {e}")
                break

        print(f"🎉 Selenium爬取完成，共获取 {len(self.books)} 本图书信息")
        return self.books

    def _extract_book_info(self, book_block, rank: int) -> Optional[Book]:
        """提取单个图书信息"""
        try:
            book = Book(rank)

            # 提取书名和链接 - 使用CSS选择器
            title_link = book_block.select_one('div.pl2 a')
            book.title = title_link.get_text(strip=True).replace('\n', '').replace(' ', '') if title_link else ""
            book.url = title_link.get('href', '') if title_link else ""

            # 提取封面图片URL
            img_element = book_block.select_one('img')
            book.img_url = img_element.get('src', '') if img_element else ""

            # 提取评分
            rating_element = book_block.select_one('span.rating_nums')
            book.rating = float(rating_element.get_text(strip=True)) if rating_element else 0.0

            # 提取评价人数
            rating_count_element = book_block.select_one('span.pl')
            if rating_count_element:
                text = rating_count_element.get_text(strip=True)
                # 使用正则表达式提取数字
                match = re.search(r'(\d+)', text)
                book.rating_count = f"{match.group(1)}人评价" if match else text
            else:
                book.rating_count = ""

            # 提取简介
            intro_element = book_block.select_one('span.inq')
            book.intro = intro_element.get_text(strip=True) if intro_element else ""

            # 提取出版信息
            self._extract_book_details(book_block, book)

            return book

        except Exception as e:
            print(f"❌ 解析第{rank}本图书时出错: {e}")
            return None

    def _extract_book_details(self, book_block, book: Book):
        """提取图书详细信息（价格+完整出版信息）"""
        # 使用CSS选择器获取出版信息
        pub_info_element = book_block.select_one('p.pl')

        if pub_info_element:
            pub_info_text = pub_info_element.get_text(strip=True)

            # 提取价格和完整出版信息
            book.pub_info, book.price = self._split_pub_info(pub_info_text)

    def _split_pub_info(self, pub_text: str) -> tuple:
        """分割出版信息：返回(完整出版信息, 价格)"""
        # 使用正则表达式从末尾匹配价格模式
        import re
        # price_pattern = r'(\d+(?:\.\d+)?)元?\s*$'
        price_pattern = r'(\d+(?:\.\d+)?)(?:\s*元)?\s*$'

        match = re.search(price_pattern, pub_text)

        if match:
            price = match.group(1)
            # 移除价格部分，得到完整出版信息
            pub_info = pub_text[:match.start()].rstrip(' /')
            return pub_info, price
        else:
            # 没有找到价格，返回原文
            return pub_text, ""

    def download_covers(self, books: List[Book] = None) -> None:
        """下载所有图书的封面图片"""
        if books is None:
            books = self.books

        print("\n🖼️  开始下载图书封面图片...")

        # 创建图片文件夹
        if not os.path.exists(self.img_folder):
            os.makedirs(self.img_folder)
            print(f"📁 创建图片文件夹: {self.img_folder}")

        for book in books:
            if book.img_url:
                success = self._download_single_image(book)
                book.download_success = success
                print(f"{'✅' if success else '❌'} {book.title} - 封面下载{'成功' if success else '失败'}")
            else:
                print(f"⚠️ {book.title} - 无封面图片URL")

    def _download_single_image(self, book: Book) -> bool:
        """下载单个图书的封面图片"""
        try:
            # 清理文件名
            clean_title = self._clean_filename(book.title)
            img_filename = f"{book.rank:02d}_{clean_title}.jpg"
            img_path = os.path.join(self.img_folder, img_filename)

            # 下载图片
            response = requests.get(book.img_url, headers=self.headers, timeout=10)
            if response.status_code == 200:
                with open(img_path, 'wb') as f:
                    f.write(response.content)
                book.img_local_path = img_path
                return True
            else:
                print(f"❌ 图片下载失败，状态码: {response.status_code}")
                return False

        except Exception as e:
            print(f"❌ 图片下载出错: {e}")
            return False

    def _clean_filename(self, filename: str) -> str:
        """清理文件名，移除Windows不允许的字符"""
        invalid_chars = r'[<>:"/\\|?*]'
        cleaned = re.sub(invalid_chars, '', filename)

        # 限制文件名长度
        if len(cleaned) > 50:
            cleaned = cleaned[:50]

        return cleaned

    def export_to_excel(self, books: List[Book] = None, filename: str = None) -> str:
        """将图书信息导出为Excel文件"""
        if books is None:
            books = self.books

        if not books:
            print("❌ 没有图书数据可导出")
            return ""

        if filename is None:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"豆瓣图书Top250_Selenium_{timestamp}.xls"

        try:
            # 创建工作簿
            workbook = xlwt.Workbook(encoding='utf-8')
            worksheet = workbook.add_sheet('豆瓣图书Top250')

            # 设置表头
            headers = ['排名', '书名', '图书链接', '封面URL', '评分', '评价人数',
                      '价格', '完整出版信息', '简介', '本地图片路径', '图片下载状态']

            # 写入表头
            for col, header in enumerate(headers):
                worksheet.write(0, col, header)

            # 写入数据
            for row, book in enumerate(books, 1):
                book_data = book.to_dict()
                for col, header in enumerate(headers):
                    worksheet.write(row, col, str(book_data[header]))

            # 保存文件到程序运行目录
            excel_path = os.path.join(os.getcwd(), filename)
            workbook.save(excel_path)
            print(f"\n📄 数据已导出到: {excel_path}")
            return excel_path

        except Exception as e:
            print(f"❌ Excel导出失败: {e}")
            return ""

    def print_books_info(self, books: List[Book] = None) -> None:
        """打印图书信息"""
        if books is None:
            books = self.books

        if not books:
            print("❌ 没有图书数据可显示")
            return

        print("\n" + "=" * 80)
        print("📚 豆瓣图书Top250信息汇总 (Selenium+requests+BeautifulSoup版本)")
        print("=" * 80)

        for book in books:
            print(book)
            print("-" * 80)


def main():
    """主函数：调用各个功能模块"""
    print("📚 豆瓣图书Top250爬虫程序 (Selenium+requests+BeautifulSoup版本)")
    print("=" * 50)

    # 设置爬取参数
    try:
        max_pages = int(input("请输入要爬取的页数 (默认为3): ").strip() or "3")
        headless = input("是否使用无头模式? (y/n，默认为y): ").strip().lower() != 'n'
    except:
        max_pages = 3
        headless = True

    print(f"🔧 爬取设置: {max_pages}页, 无头模式={'开启' if headless else '关闭'}")

    spider = None
    try:
        # 1. 创建Selenium爬虫实例
        spider = DoubanBookSeleniumSpider(headless=headless)

        # 2. 爬取图书数据
        books = spider.scrape_books(max_pages=max_pages)

        if not books:
            print("❌ 未获取到图书数据，程序结束")
            return

        # 3. 下载封面图片
        # spider.download_covers()

        # 4. 打印图书信息
        spider.print_books_info()

        # 5. 导出到Excel
        excel_file = spider.export_to_excel()

        # 6. 统计信息
        success_count = sum(1 for book in books if book.download_success)
        print(f"\n📊 爬取统计:")
        print(f"📚 总图书数: {len(books)}")
        print(f"🖼️  封面下载成功: {success_count}/{len(books)}")
        print(f"📄 Excel文件: {excel_file}")

        print("\n🎉 Selenium爬取程序执行完成！")

    except Exception as e:
        print(f"❌ 程序执行出错: {e}")

    finally:
        # 确保关闭Selenium驱动
        if spider:
            spider.close_driver()


if __name__ == "__main__":
    main()