import requests
from bs4 import BeautifulSoup
import json
import time
import random
import logging
import urllib.parse
from requests.adapters import HTTPAdapter
from urllib3.util.retry import Retry

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('dangdang_spider.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 用户代理池，用于随机切换，避免被反爬
USER_AGENTS = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/113.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Edge/114.0.1823.51 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 13_4) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36 Edg/112.0.1722.68',
    'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 13_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Firefox/115.0',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/115.0'
]

def get_random_headers():
    """
    获取随机的请求头
    """
    headers = {
        'User-Agent': random.choice(USER_AGENTS),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Cache-Control': 'max-age=0',
        'TE': 'Trailers'
    }
    return headers

# 创建session对象，配置重试机制
session = requests.Session()
retry_strategy = Retry(
    total=3,  # 总重试次数
    backoff_factor=0.3,  # 重试间隔时间因子
    status_forcelist=[429, 500, 502, 503, 504],  # 需要重试的HTTP状态码
    allowed_methods=["GET"]  # 允许重试的HTTP方法
)
adapter = HTTPAdapter(max_retries=retry_strategy)
session.mount("http://", adapter)
session.mount("https://", adapter)

def get_book_info(html_content):
    """
    从HTML内容中提取书籍信息
    """
    soup = BeautifulSoup(html_content, 'html.parser')
    books = []
    
    # 查找所有包含书籍信息的li标签，更精确地定位图书条目
    book_items = soup.select('li[class*="line"]')
    
    # 如果没有找到特定class的li标签，则尝试查找所有包含.price类的li标签
    if not book_items:
        book_items = soup.select('li:has(.price)')
    
    # 如果还是没有找到，回退到查找所有li标签并筛选
    if not book_items:
        book_items = soup.select('li')
    
    for item in book_items:
        try:
            book_info = {}
            
            # 提取书名
            name_element = item.select_one('.name a')
            if name_element:
                # 优先使用title属性，因为它包含完整书名
                if 'title' in name_element.attrs:
                    book_info['title'] = name_element['title'].strip()
                else:
                    book_info['title'] = name_element.text.strip().replace('...', '')
            else:
                # 尝试其他可能的书名元素位置
                name_alt = item.select_one('a[title]')
                if name_alt and 'title' in name_alt.attrs:
                    book_info['title'] = name_alt['title'].strip()
                else:
                    continue  # 如果找不到书名，跳过这个项目
            
            # 提取作者信息
            book_info['authors'] = []
            author_elements = item.select('.publisher_info a')
            if author_elements:
                # 收集所有可能的作者链接
                authors = []
                for author in author_elements:
                    author_name = author.text.strip()
                    if author_name and author_name not in authors:
                        authors.append(author_name)
                book_info['authors'] = authors
            
            # 提取出版日期和出版社
            publisher_info_elements = item.select('.publisher_info')
            if len(publisher_info_elements) >= 2:
                # 出版日期
                date_text = ''
                if publisher_info_elements[1].contents:
                    # 查找span标签中的日期
                    date_span = publisher_info_elements[1].select_one('span')
                    if date_span:
                        date_text = date_span.text.strip()
                    else:
                        # 尝试获取第一个文本内容
                        for content in publisher_info_elements[1].contents:
                            if isinstance(content, str) and content.strip():
                                date_text = content.strip()
                                if len(date_text) >= 4 and date_text[0:4].isdigit():
                                    break
                book_info['publish_date'] = date_text if date_text else '未知'
                
                # 出版社
                publisher_element = publisher_info_elements[1].select_one('a')
                if publisher_element:
                    book_info['publisher'] = publisher_element.text.strip()
                else:
                    # 尝试从文本内容中提取出版社
                    publisher_text = ''
                    for content in publisher_info_elements[1].contents:
                        if isinstance(content, str):
                            text = content.strip()
                            if text and not (len(text) >= 4 and text[0:4].isdigit()):
                                publisher_text = text
                                break
                    book_info['publisher'] = publisher_text if publisher_text else '未知'
            else:
                book_info['publish_date'] = '未知'
                book_info['publisher'] = '未知'
            
            # 提取价格信息
            current_price_element = item.select_one('.price_n')
            if not current_price_element:
                # 尝试其他可能的价格元素
                current_price_element = item.select_one('.price .search_now_price')
            book_info['current_price'] = current_price_element.text.strip() if current_price_element else '未知'
            
            original_price_element = item.select_one('.price_r')
            if not original_price_element:
                original_price_element = item.select_one('.price .search_pre_price')
            book_info['original_price'] = original_price_element.text.strip() if original_price_element else '未知'
            
            discount_element = item.select_one('.price_s')
            if not discount_element:
                # 尝试从文本中提取折扣信息
                price_text = item.select_one('.price').text.strip() if item.select_one('.price') else ''
                if '折' in price_text:
                    import re
                    discount_match = re.search(r'\d+\.?\d*折', price_text)
                    if discount_match:
                        book_info['discount'] = discount_match.group()
                    else:
                        book_info['discount'] = '未知'
                else:
                    book_info['discount'] = '未知'
            else:
                book_info['discount'] = discount_element.text.strip()
            
            # 提取评论数量
            comment_element = item.select_one('.star a')
            if comment_element:
                comment_text = comment_element.text.strip()
                book_info['comments'] = comment_text
            else:
                # 尝试其他可能的评论元素位置
                comment_alt = item.select_one('.search_comment_num')
                if comment_alt:
                    book_info['comments'] = comment_alt.text.strip()
                else:
                    book_info['comments'] = '0条评论'
            
            # 提取图书链接
            book_link_element = item.select_one('.name a') or item.select_one('a[href*="product.dangdang.com"]')
            if book_link_element and 'href' in book_link_element.attrs:
                book_info['link'] = book_link_element['href'].strip()
            else:
                book_info['link'] = '未知'
            
            # 提取图书封面图片
            image_element = item.select_one('.pic img')
            if image_element and 'src' in image_element.attrs:
                book_info['image_url'] = image_element['src'].strip()
            elif image_element and 'data-original' in image_element.attrs:
                book_info['image_url'] = image_element['data-original'].strip()
            else:
                book_info['image_url'] = '未知'
            
            books.append(book_info)
            
        except Exception as e:
            print(f"解析书籍信息时出错: {e}")
            continue
    
    return books

def search_dangdang(keyword, page=1):
    """
    搜索当当网图书
    """
    # 对关键词进行URL编码
    encoded_keyword = urllib.parse.quote(keyword)
    search_url = f"http://search.dangdang.com/?key={encoded_keyword}&page_index={page}"
    
    logger.info(f"开始搜索关键词: {keyword}，页码: {page}，URL: {search_url}")
    
    retry_count = 0
    max_retries = 3
    
    while retry_count < max_retries:
        try:
            # 使用随机请求头
            headers = get_random_headers()
            logger.debug(f"使用请求头: {headers['User-Agent']}")
            
            # 使用session发送请求
            response = session.get(
                search_url, 
                headers=headers, 
                timeout=(10, 30),  # 连接超时和读取超时
                allow_redirects=True
            )
            
            # 检查响应状态
            response.raise_for_status()
            
            # 设置正确的编码
            response.encoding = response.apparent_encoding
            
            logger.info(f"成功获取第{page}页搜索结果，状态码: {response.status_code}")
            
            # 随机延迟，避免过快请求
            time.sleep(random.uniform(1, 3))
            
            return get_book_info(response.text)
            
        except requests.exceptions.HTTPError as e:
            retry_count += 1
            error_msg = f"HTTP错误: {e}"
            logger.error(error_msg)
            
            # 对于429错误（请求过多），等待更长时间
            if response.status_code == 429:
                wait_time = random.uniform(10, 20)
                logger.warning(f"请求过于频繁，等待{wait_time:.2f}秒后重试 ({retry_count}/{max_retries})")
                time.sleep(wait_time)
            elif retry_count < max_retries:
                wait_time = random.uniform(2, 5)
                logger.warning(f"将在{wait_time:.2f}秒后重试 ({retry_count}/{max_retries})")
                time.sleep(wait_time)
            
        except requests.exceptions.ConnectionError as e:
            retry_count += 1
            error_msg = f"连接错误: {e}"
            logger.error(error_msg)
            if retry_count < max_retries:
                wait_time = random.uniform(3, 8)
                logger.warning(f"网络连接失败，将在{wait_time:.2f}秒后重试 ({retry_count}/{max_retries})")
                time.sleep(wait_time)
                
        except requests.exceptions.Timeout as e:
            retry_count += 1
            error_msg = f"超时错误: {e}"
            logger.error(error_msg)
            if retry_count < max_retries:
                wait_time = random.uniform(5, 10)
                logger.warning(f"请求超时，将在{wait_time:.2f}秒后重试 ({retry_count}/{max_retries})")
                time.sleep(wait_time)
                
        except Exception as e:
            error_msg = f"搜索时发生未知错误: {e}"
            logger.error(error_msg, exc_info=True)
            break
    
    logger.error(f"搜索失败，已达到最大重试次数 ({max_retries})")
    return []

def save_books_to_json(books, filename='dangdang_books.json'):
    """
    将书籍信息保存到JSON文件
    """
    try:
        # 确保目录存在
        import os
        os.makedirs(os.path.dirname(os.path.abspath(filename)), exist_ok=True)
        
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(books, f, ensure_ascii=False, indent=2)
        logger.info(f"结果已保存到 {filename} 文件中")
        print(f"\n结果已保存到 {filename} 文件中")
        return True
    except IOError as e:
        error_msg = f"IO错误，无法保存文件: {e}"
        logger.error(error_msg)
        print(f"保存文件时出错: {e}")
        return False
    except Exception as e:
        error_msg = f"保存文件时发生未知错误: {e}"
        logger.error(error_msg, exc_info=True)
        print(f"保存文件时出错: {e}")
        return False

def print_book_info(books):
    """
    打印书籍信息
    """
    logger.info(f"成功解析到 {len(books)} 本书籍信息")
    print(f"成功解析到 {len(books)} 本书籍信息：")
    
    if not books:
        logger.warning("没有找到任何书籍信息")
        print("没有找到任何书籍信息")
        return
    
    # 只打印前5本，避免输出过多
    display_count = min(5, len(books))
    for i, book in enumerate(books[:display_count], 1):
        try:
            book_info = f"\n书籍 {i}："
            book_info += f"\n书名: {book.get('title', '未知')}"
            book_info += f"\n作者: {', '.join(book.get('authors', []))}"
            book_info += f"\n出版社: {book.get('publisher', '未知')}"
            book_info += f"\n出版日期: {book.get('publish_date', '未知')}"
            book_info += f"\n当前价格: {book.get('current_price', '未知')}"
            book_info += f"\n原价: {book.get('original_price', '未知')}"
            book_info += f"\n折扣: {book.get('discount', '未知')}"
            book_info += f"\n评论数: {book.get('comments', '0条评论')}"
            book_info += f"\n链接: {book.get('link', '未知')}"
            
            print(book_info)
            logger.debug(book_info)
        except Exception as e:
            logger.error(f"打印书籍 {i} 信息时出错: {e}")
    
    if len(books) > display_count:
        remaining = len(books) - display_count
        msg = f"\n... 还有 {remaining} 本书籍信息未显示"
        print(msg)
        logger.info(msg)

def main():
    """
    主函数，支持测试HTML解析和实际搜索功能
    """
    print("===== 当当网图书爬虫 =====")
    print("1. 使用测试HTML片段解析")
    print("2. 从当当网搜索实际图书信息")
    
    # 这里我们使用用户提供的HTML片段进行测试
    test_html = '''<li class=""> 
     <div class="list_num red">1.</div>   
     <div class="pic"><a href="http://product.dangdang.com/29771967.html" target="_blank"><img src="http://img3m7.ddimg.cn/93/28/29771967-1_l_1749636776.jpg" alt="红岩 八年级上册阅读名著正版原著罗广斌杨益言著爱国主义红色经典书籍初中生课外书中国青年出版社" title="红岩 八年级上册阅读名著正版原著罗广斌杨益言著爱国主义红色经典书籍初中生课外书中国青年出版社"></a></div>    
     <div class="name"><a href="http://product.dangdang.com/29771967.html" target="_blank" title="红岩 八年级上册阅读名著正版原著罗广斌杨益言著爱国主义红色经典书籍初中生课外书中国青年出版社">红岩 八年级上册阅读名著正版原著罗广斌杨益言著爱国主义红色经典<span class="dot">...</span></a></div>    
     <div class="star"><span class="level"><span style="width: 88.2%;"></span></span><a href="http://product.dangdang.com/29771967.html?point=comment_point" target="_blank">267489条评论</a><span class="tuijian">100%推荐</span></div>    
     <div class="publisher_info"><a href="http://search.dangdang.com/?key=罗广斌" title="罗广斌,杨益言著" target="_blank">罗广斌</a>,<a href="http://search.dangdang.com/?key=杨益言" title="罗广斌,杨益言著" target="_blank">杨益言</a>著</div>    
     <div class="publisher_info"><span>2024-08-01</span>&nbsp;<a href="http://search.dangdang.com/?key=中国青年出版社" target="_blank">中国青年出版社</a></div>    
                            
     <div class="price">        
         <p> 
             <span class="price_n">¥38.40</span> 
                         <span class="price_r">¥48.00</span> 
             (<span class="price_s">8.0折</span>) 
                     </p> 
                     <p class="price_e"></p> 
                 <div class="buy_button"> 
                           <a ddname="加入购物车" name="" href="javascript:AddToShoppingCart('29771967');" class="listbtn_buy">加入购物车</a> 
                          
                
             <a ddname="加入收藏" id="addto_favorlist_29771967" name="" href="javascript:showMsgBox('addto_favorlist_29771967',encodeURIComponent('29771967&amp;platform=3'), 'http://myhome.dangdang.com/addFavoritepop');" class="listbtn_collect">收藏</a> 
         </div> 
         
     </div> 
   
     </li>'''
    
    # 默认使用测试HTML进行演示
    print("\n[默认使用测试HTML进行演示...]")
    try:
        # 解析测试HTML
        books = get_book_info(test_html)
        
        # 打印结果
        print_book_info(books)
        
        # 保存结果到JSON文件
        save_books_to_json(books, 'test_books.json')
        
        # 询问是否要进行实际搜索
        print("\n是否要搜索实际的图书信息？(y/n)")
        # 由于无法直接获取用户输入，这里默认不进行实际搜索
        # 如果需要实际搜索，可以取消下面的注释
        """
        choice = input().strip().lower()
        if choice == 'y':
            keyword = input("请输入搜索关键词: ").strip()
            page = int(input("请输入页码(默认为1): ") or "1")
            
            print(f"\n正在搜索关键词: {keyword}，页码: {page}...")
            books = search_dangdang(keyword, page)
            
            if books:
                print_book_info(books)
                save_books_to_json(books)
            else:
                print("未找到相关书籍信息")
        """
        
        # 演示实际搜索功能（使用固定关键词）
        print("\n[演示实际搜索功能...]")
        print("正在搜索关键词: 红岩，页码: 1...")
        books = search_dangdang("红岩", 1)
        
        if books:
            print_book_info(books)
            save_books_to_json(books)
        else:
            print("未找到相关书籍信息或搜索失败")
            
    except Exception as e:
        print(f"程序运行出错: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    # 添加延迟，避免过快请求
    print("程序启动中...")
    time.sleep(1)
    main()