import time
import random
import csv
import os
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options

# 配置参数
CHROME_DRIVER_PATH = r"C:\Users\25089\Desktop\学习\毕设v2\爬虫\当当网爬虫\chromedriver.exe"  # ChromeDriver路径
CATEGORY_CSV = "分类表数据.csv"  # 分类表CSV文件
OUTPUT_FOLDER = "图书信息"       # 输出文件夹
PROGRESS_FILE = "爬取进度.csv"   # 进度记录文件

# 浏览器配置（无头模式）
chrome_options = Options()
# 启用无头模式（关键配置）
chrome_options.add_argument("--headless=new")  # 最新的无头模式语法
chrome_options.add_argument("--disable-gpu")   # 禁用GPU加速（无头模式推荐）
chrome_options.add_argument("--window-size=1920,1080")  # 设置窗口大小，避免元素定位问题

# 其他反爬配置
chrome_options.add_argument("--disable-blink-features=AutomationControlled")
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-dev-shm-usage")

# 随机用户代理
USER_AGENTS = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/113.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15"
]
chrome_options.add_argument(f"user-agent={random.choice(USER_AGENTS)}")

def create_driver():
    """创建WebDriver实例（无头模式）"""
    service = Service(executable_path=CHROME_DRIVER_PATH)
    return webdriver.Chrome(service=service, options=chrome_options)

def scroll_to_bottom(driver):
    """直接滚动页面至底部"""
    print("滚动页面至底部...")
    # 直接滚动到页面最底部
    driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
    # 等待一段时间让内容加载
    wait_time = random.uniform(3, 5)
    print(f"等待 {wait_time:.2f} 秒让内容加载...")
    time.sleep(wait_time)

def extract_book_info(driver, category_id):
    """提取图书信息"""
    books = []
    
    try:
        book_elements = driver.find_elements(By.CSS_SELECTOR, '#book_list a')
        print(f"找到 {len(book_elements)} 个图书元素")
        
        for i, book_link in enumerate(book_elements, 1):
            try:
                # 提取图书链接
                book_url = book_link.get_attribute('href')
                
                # 提取封面图片
                cover_span = book_link.find_element(By.CSS_SELECTOR, '.bookcover')
                img_elements = cover_span.find_elements(By.CSS_SELECTOR, 'img:not(.promotion_label)')
                cover_url = img_elements[0].get_attribute('src') if img_elements else ''
                
                # 获取图书信息容器
                bookinfo_div = book_link.find_element(By.CSS_SELECTOR, '.bookinfo')
                
                # 提取标题
                title_element = bookinfo_div.find_element(By.CSS_SELECTOR, '.title')
                title = title_element.text.strip()
                
                # 提取作者
                author_element = bookinfo_div.find_element(By.CSS_SELECTOR, '.author')
                author = author_element.text.strip()
                
                # 提取描述
                desc_element = bookinfo_div.find_element(By.CSS_SELECTOR, '.des')
                desc = desc_element.text.strip()
                
                # 打印提取进度
                if i % 10 == 0 or i == len(book_elements):
                    print(f"  已提取 {i}/{len(book_elements)} 本图书信息")
                
                books.append({
                    'category_id': category_id,
                    '书名': title,
                    '作者': author,
                    '描述': desc,
                    '封面链接': cover_url,
                    '图书链接': book_url
                })
            except Exception as e:
                print(f"提取第 {i} 本图书信息时出错: {e}")
                continue
    except Exception as e:
        print(f"查找图书元素时出错: {e}")
    
    return books

def save_books_to_csv(books, category_name):
    """保存图书信息到CSV文件"""
    if not os.path.exists(OUTPUT_FOLDER):
        os.makedirs(OUTPUT_FOLDER)
    
    # 处理文件名非法字符
    invalid_chars = '/\\:*?"<>|'
    for char in invalid_chars:
        category_name = category_name.replace(char, '_')
    filename = f"{category_name}.csv"
    filepath = os.path.join(OUTPUT_FOLDER, filename)
    
    file_exists = os.path.isfile(filepath)
    fieldnames = ['category_id', '书名', '作者', '描述', '封面链接', '图书链接']
    
    with open(filepath, 'a', newline='', encoding='utf-8-sig') as csvfile:
        writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
        
        if not file_exists:
            writer.writeheader()
        
        for book in books:
            writer.writerow(book)
    
    return filepath

def read_category_csv():
    """读取分类表CSV文件"""
    if not os.path.exists(CATEGORY_CSV):
        print(f"错误：分类表文件 '{CATEGORY_CSV}' 不存在")
        return []
    
    categories = []
    with open(CATEGORY_CSV, 'r', encoding='utf-8-sig') as f:
        reader = csv.DictReader(f)
        required_fields = ['id', 'url', 'name']
        if not all(field in reader.fieldnames for field in required_fields):
            print(f"错误：分类表文件必须包含 {required_fields} 列")
            return []
            
        for row in reader:
            if row.get('id') and row.get('url') and row.get('name'):
                categories.append({
                    'id': row['id'].strip(),
                    'url': row['url'].strip(),
                    'name': row['name'].strip()
                })
    
    print(f"成功读取 {len(categories)} 条有效分类数据")
    return categories

def load_progress():
    """加载已爬取的进度记录"""
    progress = set()
    if os.path.exists(PROGRESS_FILE):
        with open(PROGRESS_FILE, 'r', encoding='utf-8-sig') as f:
            reader = csv.DictReader(f)
            if 'category_id' in reader.fieldnames:
                for row in reader:
                    progress.add(row['category_id'].strip())
    print(f"已爬取的分类数量: {len(progress)}")
    return progress

def update_progress(category_id, category_name, status, message=""):
    """更新爬取进度记录"""
    file_exists = os.path.exists(PROGRESS_FILE)
    fieldnames = ['category_id', 'category_name', 'status', 'timestamp', 'message']
    
    with open(PROGRESS_FILE, 'a', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=fieldnames)
        if not file_exists:
            writer.writeheader()
        
        writer.writerow({
            'category_id': category_id,
            'category_name': category_name,
            'status': status,  # 'success' 或 'failed'
            'timestamp': time.strftime("%Y-%m-%d %H:%M:%S"),
            'message': message
        })

def main():
    # 读取分类数据
    categories = read_category_csv()
    if not categories:
        return
    
    # 加载已爬取进度
    completed_categories = load_progress()
    
    # 筛选未爬取的分类
    remaining_categories = [
        cat for cat in categories 
        if cat['id'] not in completed_categories
    ]
    
    print(f"待爬取的分类数量: {len(remaining_categories)}")
    if not remaining_categories:
        print("所有分类已爬取完成！")
        return
    
    # 检查ChromeDriver路径
    if not os.path.exists(CHROME_DRIVER_PATH):
        print(f"错误: 未找到ChromeDriver驱动程序，请检查路径: {CHROME_DRIVER_PATH}")
        return
    
    driver = None
    try:
        driver = create_driver()
        
        # 遍历每个待爬取的分类
        for idx, category in enumerate(remaining_categories, 1):
            category_id = category['id']
            category_name = category['name']
            category_url = category['url']
            
            print(f"\n===== 处理第 {idx}/{len(remaining_categories)} 个分类 (名称: {category_name}, ID: {category_id}) =====")
            print(f"访问URL: {category_url}")
            
            try:
                # 访问分类页面
                driver.get(f'https:{category_url}')
                
                # 随机等待
                wait_time = random.uniform(3, 8)
                print(f"页面加载完成，等待 {wait_time:.2f} 秒...")
                time.sleep(wait_time)
                
                # 滚动加载内容
                scroll_to_bottom(driver)
                
                # 提取图书信息
                print("开始提取图书信息...")
                books = extract_book_info(driver, category_id)
                
                # 保存数据
                if books:
                    filepath = save_books_to_csv(books, category_name)
                    print(f"分类 {category_name} 处理完成，提取 {len(books)} 本图书信息")
                    print(f"数据已保存到: {filepath}")
                    update_progress(category_id, category_name, "success")
                else:
                    print(f"分类 {category_name} 未提取到任何图书信息")
                    update_progress(category_id, category_name, "success", "未提取到图书信息")
                
            except Exception as e:
                error_msg = str(e)
                print(f"处理分类 {category_name} 时出错: {error_msg}")
                update_progress(category_id, category_name, "failed", error_msg)
                continue
            
            # 分类间等待
            if idx < len(remaining_categories):
                wait_time = random.uniform(8, 15)
                print(f"等待 {wait_time:.2f} 秒后处理下一个分类...")
                time.sleep(wait_time)
        
        print(f"\n本次处理完成！共处理 {len(remaining_categories)} 个分类")
        print(f"进度记录已保存到: {PROGRESS_FILE}")
        print(f"图书数据已保存到: {OUTPUT_FOLDER} 文件夹")
        
    except Exception as e:
        print(f"程序运行出错: {e}")
    finally:
        if driver:
            print("关闭浏览器...")
            driver.quit()

if __name__ == "__main__":
    main()