import requests
from bs4 import BeautifulSoup
import json
import time
import random
import os
import re
from tqdm import tqdm
import logging
from fake_useragent import UserAgent
import urllib.parse
import shutil
import hashlib

# 配置日志
logging.basicConfig(
    level=logging.DEBUG,  # 改为DEBUG级别以获取更多信息
    format='%(asctime)s - %(levelname)s - %(message)s',
    filename='douban_books_spider.log'
)
logger = logging.getLogger(__name__)

# 创建数据目录
if not os.path.exists('data'):
    os.makedirs('data')

# 创建图片目录
if not os.path.exists('images'):
    os.makedirs('images')

# 随机UA生成器
ua = UserAgent()

# 代理列表 - 可以添加你的代理服务器
# 格式: {'http': 'http://host:port', 'https': 'http://host:port'}
PROXIES = [
    None,  # 不使用代理
    # 添加你的代理，例如:
    # {'http': 'http://10.10.1.10:3128', 'https': 'http://10.10.1.10:1080'},
    # {'http': 'http://user:pass@10.10.1.10:3128', 'https': 'http://user:pass@10.10.1.10:1080'},
]

# 获取随机代理
def get_random_proxy():
    return random.choice(PROXIES)

# 书籍列表页URL模板
base_url = 'https://book.douban.com/top250?start={}'

# 存储所有书籍信息
all_books = []

# 随机延迟函数
def random_sleep(min_time=1, max_time=3):
    sleep_time = random.uniform(min_time, max_time)
    logger.debug(f"休眠 {sleep_time:.2f} 秒")
    time.sleep(sleep_time)

# 下载图片并保存
def download_image(img_url, book_title):
    if not img_url:
        logger.warning(f"图片URL为空，无法下载图片: {book_title}")
        return None
    
    try:
        # 清理书名，移除特殊字符，以便用作文件名
        safe_title = re.sub(r'[\\/*?:"<>|]', '', book_title)
        safe_title = safe_title.replace(' ', '_')
        
        # 使用MD5哈希作为文件名以避免编码问题
        title_hash = hashlib.md5(safe_title.encode('utf-8')).hexdigest()
        
        # 确定文件扩展名
        file_ext = os.path.splitext(urllib.parse.urlparse(img_url).path)[1]
        if not file_ext:
            file_ext = '.jpg'  # 默认扩展名
        
        # 构建本地文件路径
        local_path = os.path.join('images', f"{title_hash}{file_ext}")
        
        # 检查文件是否已存在
        if os.path.exists(local_path):
            logger.info(f"图片已存在: {local_path}")
            return local_path
        
        # 获取图片内容
        headers = {'User-Agent': ua.random}
        response = requests.get(img_url, headers=headers, stream=True)
        response.raise_for_status()  # 检查请求是否成功
        
        # 保存图片
        with open(local_path, 'wb') as f:
            shutil.copyfileobj(response.raw, f)
        
        # 保存原始书名到映射文件，方便查看
        mapping_file = os.path.join('images', 'image_name_mapping.txt')
        with open(mapping_file, 'a', encoding='utf-8') as f:
            f.write(f"{title_hash}{file_ext} => {book_title}\n")
        
        logger.info(f"图片下载成功: {local_path}")
        return local_path
    
    except Exception as e:
        logger.error(f"下载图片时出错: {e}, URL: {img_url}")
        return None

# 获取页面内容
def fetch_page(url):
    try:
        headers = {'User-Agent': ua.random}
        proxy = get_random_proxy()
        
        logger.debug(f"请求URL: {url}，代理: {proxy}")
        
        response = requests.get(
            url, 
            headers=headers, 
            proxies=proxy,
            timeout=10
        )
        
        response.raise_for_status()  # 检查请求是否成功
        response.encoding = 'utf-8'  # 确保编码正确
        
        soup = BeautifulSoup(response.text, 'html.parser')
        return soup
    
    except requests.exceptions.RequestException as e:
        logger.error(f"请求失败: {e}")
        return None
    except Exception as e:
        logger.error(f"处理页面时出错: {e}")
        return None

# 爬取书籍列表页
def crawl_book_list_pages():
    logger.info("开始爬取书籍列表页...")
    print("开始爬取书籍列表页...")
    
    # 豆瓣Top250书籍共有10页
    for page in tqdm(range(0, 10)):
        try:
            url = base_url.format(page * 25)
            soup = fetch_page(url)
            
            if not soup:
                logger.error(f"无法获取页面内容: {url}")
                continue
            
            # 输出一些页面信息用于调试
            page_title = soup.title.text if soup.title else "无标题"
            logger.debug(f"页面标题: {page_title}")
            
            # 检查页面是否包含预期的结构
            book_items = soup.select('div.item')
            logger.debug(f"找到 {len(book_items)} 个书籍项")
            
            if not book_items:
                logger.warning(f"页面未找到书籍项，可能是选择器有误或页面结构变化: {url}")
                # 尝试找出页面的主要结构
                main_content = soup.select('div#content')
                if main_content:
                    logger.debug(f"页面存在content div: {len(main_content)}")
                    # 保存页面结构以供分析
                    with open(f"debug_page_structure_{page}.html", "w", encoding="utf-8") as f:
                        f.write(str(soup))
                continue
            
            for item in book_items:
                try:
                    book_info = {}
                    
                    # 获取书籍链接
                    link_element = item.select_one('div.pic a')
                    if link_element:
                        book_info['url'] = link_element['href']
                        logger.debug(f"解析到书籍链接: {book_info['url']}")
                    else:
                        logger.warning("未找到书籍链接")
                    
                    # 获取书籍封面
                    img_element = item.select_one('div.pic img')
                    if img_element and 'src' in img_element.attrs:
                        book_info['img_url'] = img_element['src']
                    else:
                        logger.warning("未找到书籍封面")
                    
                    # 获取书籍标题
                    title_element = item.select_one('div.pl2 a')
                    if title_element:
                        book_info['title'] = title_element.text.strip().replace('\n', '').replace(' ', '')
                        logger.debug(f"解析到书籍标题: {book_info['title']}")
                        # 处理副标题
                        if ':' in book_info['title']:
                            book_info['title'] = book_info['title'].split(':')[0]
                    else:
                        logger.warning("未找到书籍标题")
                    
                    # 获取评分
                    rating_element = item.select_one('span.rating_nums')
                    if rating_element:
                        book_info['rating'] = float(rating_element.text.strip())
                    else:
                        logger.warning("未找到评分")
                    
                    # 获取评价人数
                    review_count_element = item.select_one('span.pl')
                    if review_count_element:
                        review_count_text = review_count_element.text.strip()
                        match = re.search(r'(\d+)人评价', review_count_text)
                        if match:
                            book_info['review_count'] = int(match.group(1))
                        else:
                            logger.warning(f"评价人数格式不匹配: {review_count_text}")
                    else:
                        logger.warning("未找到评价人数")
                    
                    # 获取简介
                    quote_element = item.select_one('p.quote span.inq')
                    if quote_element:
                        book_info['quote'] = quote_element.text.strip()
                    
                    # 获取基本信息
                    info_element = item.select_one('p.pl')
                    if info_element:
                        book_info['info'] = info_element.text.strip()
                        
                        # 提取作者、出版社、出版年份和价格
                        info_parts = book_info['info'].split('/')
                        if len(info_parts) >= 4:
                            book_info['author'] = '/'.join(info_parts[:-3]).strip()
                            book_info['publisher'] = info_parts[-3].strip()
                            book_info['publish_year'] = info_parts[-2].strip()
                            book_info['price'] = info_parts[-1].strip()
                        else:
                            logger.warning(f"基本信息格式不符合预期: {book_info['info']}")
                    else:
                        logger.warning("未找到基本信息")
                    
                    # 下载图片并更新本地路径
                    if 'img_url' in book_info and 'title' in book_info:
                        local_img_path = download_image(book_info['img_url'], book_info['title'])
                        if local_img_path:
                            book_info['local_img_path'] = local_img_path
                    
                    # 检查书籍信息是否有效
                    if 'title' in book_info and 'url' in book_info:
                        all_books.append(book_info)
                        logger.debug(f"成功添加书籍: {book_info['title']}")
                    else:
                        logger.warning("书籍信息不完整，跳过")
                    
                except Exception as e:
                    logger.error(f"解析书籍列表项时出错: {e}")
                    print(f"解析书籍列表项时出错: {e}")
                    continue
            
            # 随机延迟，避免请求过于频繁
            random_sleep(2, 5)
            
        except Exception as e:
            logger.error(f"爬取列表页 {page+1} 时出错: {e}")
            print(f"爬取列表页 {page+1} 时出错: {e}")
            continue
    
    logger.info(f"共爬取 {len(all_books)} 本书籍信息")
    print(f"共爬取 {len(all_books)} 本书籍信息")

# 解析书籍详情页
def parse_book_detail(soup, book_info):
    try:
        # 获取分类信息 - 尝试多种可能的选择器
        category_selectors = [
            'div#db-tags-section a.tag',
            'div.tags a.tag',
            'div.indent a.tag'
        ]
        
        categories = []
        for selector in category_selectors:
            category_elements = soup.select(selector)
            categories = [elem.text.strip() for elem in category_elements]
            if categories:
                break
        
        if categories:
            book_info['categories'] = categories
        
        # 获取作者信息 - 尝试多种可能的选择器
        author_selectors = [
            'div#info a[href*="/author/"]',
            'div.indent a[href*="/author/"]',
            'div#info span.pl:contains("作者") + a'
        ]
        
        authors = []
        for selector in author_selectors:
            try:
                author_elements = soup.select(selector)
                authors = [elem.text.strip() for elem in author_elements]
                if authors:
                    break
            except:
                continue
        
        # 如果通过链接找不到作者，尝试从基本信息中提取
        if not authors and 'info' in book_info:
            info_text = book_info['info']
            if '作者' in info_text or '著' in info_text:
                # 简单提取，可能需要根据实际情况调整
                author_match = re.search(r'(?:作者|著):\s*([^/]+)', info_text)
                if author_match:
                    authors = [author_match.group(1).strip()]
        
        if authors and not book_info.get('author'):
            book_info['author'] = '/'.join(authors)
        
        # 获取出版社信息 - 尝试多种可能的选择器和提取方法
        publisher = None
        publisher_selectors = [
            'div#info span.pl:contains("出版社") + text()',
            'div#info span.pl:contains("出版社")',
            'div#info'
        ]
        
        for selector in publisher_selectors:
            try:
                if '+' in selector:  # 针对兄弟文本节点的特殊处理
                    pl_element = soup.select_one(selector.split('+')[0])
                    if pl_element and pl_element.next_sibling:
                        publisher = pl_element.next_sibling.strip()
                        break
                else:
                    info_element = soup.select_one(selector)
                    if info_element:
                        info_text = info_element.text
                        publisher_match = re.search(r'出版社:\s*([^\n]+)', info_text)
                        if publisher_match:
                            publisher = publisher_match.group(1).strip()
                            break
            except:
                continue
        
        if publisher:
            book_info['publisher'] = publisher
        
        # 获取出版年份
        publish_year = None
        year_selectors = [
            'div#info span.pl:contains("出版年") + text()',
            'div#info span.pl:contains("出版年")',
            'div#info'
        ]
        
        for selector in year_selectors:
            try:
                if '+' in selector:
                    pl_element = soup.select_one(selector.split('+')[0])
                    if pl_element and pl_element.next_sibling:
                        publish_year = pl_element.next_sibling.strip()
                        break
                else:
                    info_element = soup.select_one(selector)
                    if info_element:
                        info_text = info_element.text
                        year_match = re.search(r'出版年:\s*([^\n]+)', info_text)
                        if year_match:
                            publish_year = year_match.group(1).strip()
                            break
            except:
                continue
        
        if publish_year:
            book_info['publish_year'] = publish_year
        
        # 获取价格
        price = None
        price_selectors = [
            'div#info span.pl:contains("定价") + text()',
            'div#info span.pl:contains("定价")',
            'div#info'
        ]
        
        for selector in price_selectors:
            try:
                if '+' in selector:
                    pl_element = soup.select_one(selector.split('+')[0])
                    if pl_element and pl_element.next_sibling:
                        price = pl_element.next_sibling.strip()
                        break
                else:
                    info_element = soup.select_one(selector)
                    if info_element:
                        info_text = info_element.text
                        price_match = re.search(r'定价:\s*([^\n]+)', info_text)
                        if price_match:
                            price = price_match.group(1).strip()
                            break
            except:
                continue
        
        if price:
            book_info['price'] = price
        
        # 获取评分分布
        rating_distribution = {}
        rating_elements = soup.select('div.rating_wrap div.item')
        for elem in rating_elements:
            try:
                rating_text = elem.select_one('span.count').text.strip()
                rating_value = elem.select_one('span').text.strip()
                rating_distribution[rating_value] = int(rating_text.replace('人评价', ''))
            except:
                continue
        
        if rating_distribution:
            book_info['rating_distribution'] = rating_distribution
        
        return True
    
    except Exception as e:
        logger.error(f"解析书籍详情页内容时出错: {e}")
        print(f"解析书籍详情页内容时出错: {e}")
        return False

# 爬取书籍详情页
def crawl_book_detail_pages():
    logger.info("开始爬取书籍详情页...")
    print("开始爬取书籍详情页...")
    
    for i, book in enumerate(tqdm(all_books)):
        try:
            # 跳过没有URL的书籍
            if 'url' not in book:
                logger.warning(f"书籍 {i+1} 没有URL，跳过")
                print(f"书籍 {i+1} 没有URL，跳过")
                continue
            
            # 重试机制
            max_retries = 5
            success = False
            
            for attempt in range(max_retries):
                try:
                    soup = fetch_page(book['url'])
                    
                    if not soup:
                        continue
                    
                    # 检查页面是否有效
                    if 'book.douban.com/subject/' not in book['url']:
                        logger.warning(f"无效的书籍URL: {book['url']}")
                        break
                    
                    # 解析书籍详情
                    success = parse_book_detail(soup, book)
                    
                    if success:
                        logger.info(f"成功爬取书籍详情: {book.get('title', '未知')}")
                        break
                    else:
                        logger.warning(f"解析书籍详情失败，尝试 {attempt+1}/{max_retries}: {book.get('title', '未知')}")
                        random_sleep(3, 6)
                
                except Exception as e:
                    logger.error(f"爬取书籍详情页时出错: {e}, 尝试 {attempt+1}/{max_retries}")
                    print(f"爬取书籍详情页时出错: {e}, 尝试 {attempt+1}/{max_retries}")
                    random_sleep(3, 6)
            
            if not success:
                logger.error(f"达到最大重试次数，爬取书籍详情失败: {book.get('title', '未知')}")
                print(f"达到最大重试次数，爬取书籍详情失败: {book.get('title', '未知')}")
            
            # 随机延迟，避免请求过于频繁
            random_sleep(3, 7)
            
            # 每爬取10本书保存一次数据
            if (i + 1) % 10 == 0:
                save_books_data()
                logger.info(f"已保存 {i+1} 本书籍数据")
                print(f"已保存 {i+1} 本书籍数据")
                
        except Exception as e:
            logger.error(f"爬取书籍详情页 {i+1}/{len(all_books)} 时出错: {e}")
            print(f"爬取书籍详情页 {i+1}/{len(all_books)} 时出错: {e}")
            continue
    
    # 爬取完成后保存所有数据
    save_books_data()
    logger.info("书籍详情页爬取完成")
    print("书籍详情页爬取完成")

# 保存所有书籍数据到JSON文件
def save_books_data():
    logger.info("保存书籍数据...")
    print("保存书籍数据...")
    
    # 保存书籍数据
    with open('data/douban_books.json', 'w', encoding='utf-8') as f:
        # 将相对路径转换为网页友好的路径
        books_to_save = []
        for book in all_books:
            book_copy = book.copy()
            # 如果有本地图片路径，将其转换为Web友好的路径格式
            if 'local_img_path' in book_copy:
                # 将Windows路径分隔符替换为Web友好的正斜杠
                web_path = book_copy['local_img_path'].replace('\\', '/')
                book_copy['local_img_path'] = web_path
            books_to_save.append(book_copy)
            
        json.dump(books_to_save, f, ensure_ascii=False, indent=4)
    
    logger.info(f"书籍数据已保存至 data/douban_books.json，共 {len(all_books)} 本书")
    print(f"书籍数据已保存至 data/douban_books.json，共 {len(all_books)} 本书")
    
    # 处理分类统计
    category_stats = {}
    for book in all_books:
        if 'categories' in book and book['categories']:
            for category in book['categories']:
                if category in category_stats:
                    category_stats[category] += 1
                else:
                    category_stats[category] = 1
    
    # 转换为列表格式并排序
    category_data = [{'name': cat, 'value': count} for cat, count in category_stats.items()]
    category_data.sort(key=lambda x: x['value'], reverse=True)
    
    # 保存分类统计数据
    with open('data/category_stats.json', 'w', encoding='utf-8') as f:
        json.dump(category_data, f, ensure_ascii=False, indent=4)
    
    logger.info(f"分类统计数据已保存至 data/category_stats.json，共 {len(category_data)} 个分类")
    print(f"分类统计数据已保存至 data/category_stats.json，共 {len(category_data)} 个分类")
    
    # 处理作者统计
    author_stats = {}
    for book in all_books:
        if 'author' in book and book['author']:
            # 处理多作者情况
            authors = book['author'].split('/')
            for author in authors:
                author = author.strip()
                if author:
                    if author in author_stats:
                        author_stats[author] += 1
                    else:
                        author_stats[author] = 1
    
    # 转换为列表格式并排序
    author_data = [{'name': auth, 'value': count} for auth, count in author_stats.items()]
    author_data.sort(key=lambda x: x['value'], reverse=True)
    
    # 保存作者统计数据
    with open('data/author_stats.json', 'w', encoding='utf-8') as f:
        json.dump(author_data, f, ensure_ascii=False, indent=4)
    
    logger.info(f"作者统计数据已保存至 data/author_stats.json，共 {len(author_data)} 个作者")
    print(f"作者统计数据已保存至 data/author_stats.json，共 {len(author_data)} 个作者")

# 生成分类统计数据
def generate_category_stats():
    logger.info("生成分类统计数据...")
    print("生成分类统计数据...")
    
    # 统计每个分类的书籍数量
    category_count = {}
    for book in all_books:
        if 'categories' in book:
            for category in book['categories']:
                if category in category_count:
                    category_count[category] += 1
                else:
                    category_count[category] = 1
    
    # 转换为列表并排序
    category_stats = [{'name': category, 'value': count} for category, count in category_count.items()]
    category_stats.sort(key=lambda x: x['value'], reverse=True)
    
    # 保存分类统计数据
    try:
        with open('data/category_stats.json', 'w', encoding='utf-8') as f:
            json.dump(category_stats, f, ensure_ascii=False, indent=2)
        logger.info(f"共统计 {len(category_stats)} 个分类，数据已保存到 data/category_stats.json")
        print(f"共统计 {len(category_stats)} 个分类")
    except Exception as e:
        logger.error(f"保存分类统计数据时出错: {e}")
        print(f"保存分类统计数据时出错: {e}")

# 生成作者统计数据
def generate_author_stats():
    logger.info("生成作者统计数据...")
    print("生成作者统计数据...")
    
    # 统计每个作者的书籍数量
    author_count = {}
    for book in all_books:
        if 'author' in book:
            # 处理多个作者的情况
            authors = book['author'].split('/')
            for author in authors:
                author = author.strip()
                if author and author != '等':  # 过滤掉空作者和"等"
                    if author in author_count:
                        author_count[author] += 1
                    else:
                        author_count[author] = 1
    
    # 转换为列表并排序
    author_stats = [{'name': author, 'value': count} for author, count in author_count.items()]
    author_stats.sort(key=lambda x: x['value'], reverse=True)
    
    # 保存作者统计数据
    try:
        with open('data/author_stats.json', 'w', encoding='utf-8') as f:
            json.dump(author_stats, f, ensure_ascii=False, indent=2)
        logger.info(f"共统计 {len(author_stats)} 位作者，数据已保存到 data/author_stats.json")
        print(f"共统计 {len(author_stats)} 位作者")
    except Exception as e:
        logger.error(f"保存作者统计数据时出错: {e}")
        print(f"保存作者统计数据时出错: {e}")

# 直接从HTML文件加载数据（当网络爬取失败时的备选方案）
def load_from_html_file(html_file_path):
    logger.info(f"从HTML文件加载数据: {html_file_path}")
    print(f"从HTML文件加载数据: {html_file_path}")
    
    try:
        with open(html_file_path, 'r', encoding='utf-8') as f:
            html_content = f.read()
        
        soup = BeautifulSoup(html_content, 'html.parser')
        
        # 解析书籍列表
        book_items = soup.select('div.item')
        logger.info(f"从HTML文件中找到 {len(book_items)} 本书")
        print(f"从HTML文件中找到 {len(book_items)} 本书")
        
        for item in book_items:
            try:
                book_info = {}
                
                # 获取书籍链接
                link_element = item.select_one('div.pic a')
                if link_element:
                    book_info['url'] = link_element['href']
                
                # 获取书籍封面
                img_element = item.select_one('div.pic img')
                if img_element and 'src' in img_element.attrs:
                    book_info['img_url'] = img_element['src']
                
                # 获取书籍标题
                title_element = item.select_one('div.pl2 a')
                if title_element:
                    book_info['title'] = title_element.text.strip().replace('\n', '').replace(' ', '')
                    # 处理副标题
                    if ':' in book_info['title']:
                        book_info['title'] = book_info['title'].split(':')[0]
                
                # 获取评分
                rating_element = item.select_one('span.rating_nums')
                if rating_element:
                    book_info['rating'] = float(rating_element.text.strip())
                
                # 获取评价人数
                review_count_element = item.select_one('span.pl')
                if review_count_element:
                    review_count_text = review_count_element.text.strip()
                    match = re.search(r'(\d+)人评价', review_count_text)
                    if match:
                        book_info['review_count'] = int(match.group(1))
                
                # 获取简介
                quote_element = item.select_one('p.quote span.inq')
                if quote_element:
                    book_info['quote'] = quote_element.text.strip()
                
                # 获取基本信息
                info_element = item.select_one('p.pl')
                if info_element:
                    book_info['info'] = info_element.text.strip()
                    
                    # 提取作者、出版社、出版年份和价格
                    info_parts = book_info['info'].split('/')
                    if len(info_parts) >= 4:
                        book_info['author'] = '/'.join(info_parts[:-3]).strip()
                        book_info['publisher'] = info_parts[-3].strip()
                        book_info['publish_year'] = info_parts[-2].strip()
                        book_info['price'] = info_parts[-1].strip()
                
                # 添加分类信息
                book_info['categories'] = ['文学', '小说', '经典']  # 默认分类，你可以根据书籍名称或其他信息进行简单分类
                
                all_books.append(book_info)
                
            except Exception as e:
                logger.error(f"解析HTML文件中的书籍项时出错: {e}")
                continue
        
        return True
    
    except Exception as e:
        logger.error(f"从HTML文件加载数据失败: {e}")
        print(f"从HTML文件加载数据失败: {e}")
        return False

# 生成示例数据（当所有获取数据的方法都失败时）
def generate_sample_books():
    logger.info("生成示例数据...")
    print("生成示例数据...")
    
    sample_books = [
        {
            "title": "活着",
            "author": "余华",
            "rating": 9.4,
            "review_count": 30000,
            "publisher": "作家出版社",
            "publish_year": "2012",
            "price": "20.00元",
            "categories": ["文学", "小说", "中国文学", "生活", "经典", "人性", "当代文学", "中国"]
        },
        {
            "title": "百年孤独",
            "author": "加西亚·马尔克斯",
            "rating": 9.3,
            "review_count": 25000,
            "publisher": "南海出版公司",
            "publish_year": "2011",
            "price": "39.50元",
            "categories": ["文学", "小说", "经典", "外国文学", "魔幻现实主义", "马尔克斯", "拉丁美洲文学", "名著"]
        },
        {
            "title": "1984",
            "author": "乔治·奥威尔",
            "rating": 9.4,
            "review_count": 20000,
            "publisher": "北京十月文艺出版社",
            "publish_year": "2010",
            "price": "28.00元",
            "categories": ["小说", "经典", "反乌托邦", "政治", "英国", "乔治·奥威尔", "文学", "奥威尔"]
        },
        {
            "title": "三体",
            "author": "刘慈欣",
            "rating": 9.3,
            "review_count": 35000,
            "publisher": "重庆出版社",
            "publish_year": "2008",
            "price": "23.00元",
            "categories": ["科幻", "中国科幻", "科幻小说", "小说", "刘慈欣", "硬科幻", "经典", "三体"]
        },
        {
            "title": "红楼梦",
            "author": "曹雪芹",
            "rating": 9.6,
            "review_count": 28000,
            "publisher": "人民文学出版社",
            "publish_year": "1996",
            "price": "59.70元",
            "categories": ["古典文学", "经典", "名著", "红楼梦", "中国文学", "文学", "小说", "古典"]
        },
        {
            "title": "平凡的世界",
            "author": "路遥",
            "rating": 9.2,
            "review_count": 23000,
            "publisher": "北京十月文艺出版社",
            "publish_year": "2012",
            "price": "68.00元",
            "categories": ["小说", "路遥", "中国文学", "文学", "经典", "当代文学", "中国", "现实主义"]
        },
        {
            "title": "解忧杂货店",
            "author": "东野圭吾",
            "rating": 8.8,
            "review_count": 18000,
            "publisher": "南海出版公司",
            "publish_year": "2014",
            "price": "39.50元",
            "categories": ["治愈", "小说", "日本文学", "东野圭吾", "温暖", "感动", "悬疑", "文学"]
        },
        {
            "title": "白夜行",
            "author": "东野圭吾",
            "rating": 9.2,
            "review_count": 22000,
            "publisher": "南海出版公司",
            "publish_year": "2013",
            "price": "39.50元",
            "categories": ["东野圭吾", "推理", "日本", "日本文学", "小说", "悬疑", "文学", "经典"]
        },
        {
            "title": "围城",
            "author": "钱锺书",
            "rating": 9.0,
            "review_count": 17000,
            "publisher": "人民文学出版社",
            "publish_year": "1991",
            "price": "19.00元",
            "categories": ["钱锺书", "小说", "中国文学", "经典", "围城", "文学", "讽刺", "钱钟书"]
        },
        {
            "title": "局外人",
            "author": "阿尔贝·加缪",
            "rating": 9.0,
            "review_count": 16000,
            "publisher": "上海译文出版社",
            "publish_year": "2010",
            "price": "22.00元",
            "categories": ["加缪", "外国文学", "小说", "哲学", "经典", "存在主义", "法国", "文学"]
        }
    ]
    
    # 将示例数据添加到全局列表
    global all_books
    all_books = sample_books
    
    # 保存示例数据
    save_books_data()
    
    return True

# 从调试HTML文件中提取数据
def extract_data_from_debug_files():
    logger.info("从调试HTML文件中提取数据...")
    print("从调试HTML文件中提取数据...")
    
    # 清空已有的书籍数据
    all_books.clear()
    
    # 查找所有调试HTML文件
    debug_files = [f for f in os.listdir() if f.startswith('debug_page_structure_') and f.endswith('.html')]
    if not debug_files:
        logger.error("没有找到调试HTML文件")
        print("没有找到调试HTML文件")
        return False
    
    logger.info(f"找到 {len(debug_files)} 个调试HTML文件")
    print(f"找到 {len(debug_files)} 个调试HTML文件")
    
    # 处理每个调试文件
    for debug_file in tqdm(debug_files):
        try:
            # 读取HTML文件
            with open(debug_file, 'r', encoding='utf-8') as f:
                html_content = f.read()
            
            # 解析HTML
            soup = BeautifulSoup(html_content, 'html.parser')
            
            # 查找书籍项
            book_items = soup.select('tr.item')
            logger.info(f"在文件 {debug_file} 中找到 {len(book_items)} 个书籍项")
            
            for item in book_items:
                try:
                    book_info = {}
                    
                    # 获取书籍链接和封面
                    img_link = item.select_one('a.nbg')
                    if img_link:
                        book_info['url'] = img_link['href']
                        img = img_link.select_one('img')
                        if img and 'src' in img.attrs:
                            book_info['img_url'] = img['src']
                    
                    # 获取书籍标题
                    title_element = item.select_one('div.pl2 a')
                    if title_element:
                        book_info['title'] = title_element.text.strip().replace('\n', '').replace(' ', '')
                        logger.debug(f"解析到书籍标题: {book_info['title']}")
                    
                    # 获取评分
                    rating_element = item.select_one('span.rating_nums')
                    if rating_element:
                        try:
                            book_info['rating'] = float(rating_element.text.strip())
                        except:
                            logger.warning(f"无法转换评分: {rating_element.text.strip()}")
                    
                    # 获取评价人数
                    review_count_text = item.select_one('span.pl')
                    if review_count_text:
                        match = re.search(r'(\d+)人评价', review_count_text.text)
                        if match:
                            book_info['review_count'] = int(match.group(1))
                    
                    # 获取简介
                    quote_element = item.select_one('span.inq')
                    if quote_element:
                        book_info['quote'] = quote_element.text.strip()
                    
                    # 获取基本信息
                    info_element = item.select_one('p.pl')
                    if info_element:
                        book_info['info'] = info_element.text.strip()
                        
                        # 提取作者、出版社、出版年份和价格
                        info_parts = book_info['info'].split('/')
                        if len(info_parts) >= 4:
                            book_info['author'] = '/'.join(info_parts[:-3]).strip()
                            book_info['publisher'] = info_parts[-3].strip()
                            book_info['publish_year'] = info_parts[-2].strip()
                            book_info['price'] = info_parts[-1].strip()
                    
                    # 添加分类信息
                    book_info['categories'] = ['文学', '小说', '经典']  # 默认分类，你可以根据书籍名称或其他信息进行简单分类
                    
                    # 下载图片并更新本地路径
                    if 'img_url' in book_info and 'title' in book_info:
                        local_img_path = download_image(book_info['img_url'], book_info['title'])
                        if local_img_path:
                            book_info['local_img_path'] = local_img_path
                    
                    all_books.append(book_info)
                    
                except Exception as e:
                    logger.error(f"解析HTML文件中的书籍项时出错: {e}")
                    continue
        
        except Exception as e:
            logger.error(f"处理调试文件 {debug_file} 时出错: {e}")
            continue
    
    logger.info(f"从调试文件中提取了 {len(all_books)} 本书籍信息")
    print(f"从调试文件中提取了 {len(all_books)} 本书籍信息")
    
    if len(all_books) == 0:
        logger.warning("没有从调试文件中提取到任何书籍信息")
        print("没有从调试文件中提取到任何书籍信息")
        return False
    
    return True

# 主函数
def main():
    logger.info("程序开始运行...")
    print("豆瓣图书爬虫启动...")
    
    try:
        # 尝试从调试文件中提取数据
        if extract_data_from_debug_files():
            logger.info("从调试文件中提取数据成功")
            print("从调试文件中提取数据成功")
        else:
            logger.warning("从调试文件中提取数据失败，尝试直接爬取")
            print("从调试文件中提取数据失败，尝试直接爬取")
            
            # 尝试直接爬取
            crawl_book_list_pages()
            # crawl_book_detail_pages()  # 详情页爬取可能会导致IP被封，暂时禁用
        
        # 如果没有获取到数据，生成一些示例数据
        if not all_books:
            logger.warning("未获取到任何书籍数据，生成示例数据")
            print("未获取到任何书籍数据，生成示例数据")
            generate_sample_books()
        
        # 保存数据
        save_books_data()
        
        logger.info("程序运行完成")
        print("豆瓣图书爬虫运行完成")
    
    except Exception as e:
        logger.error(f"程序运行出错: {e}")
        print(f"程序运行出错: {e}")

# 如果直接运行此脚本
if __name__ == "__main__":
    main()
    