import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)
logger = logging.getLogger(__name__)

class DangDangScraper:
    def __init__(self):
        self.base_url = "http://bang.dangdang.com/books/bestsellers/01.00.00.00.00.00-recent7-0-0-1-"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }
        self.all_books = []
    
    def get_page_content(self, page_num):
        """获取指定页码的网页内容"""
        url = f"{self.base_url}{page_num}"
        logger.info(f"正在访问: {url}")
        
        try:
            response = requests.get(url, headers=self.headers)
            response.encoding = 'gbk'  # 当当网使用GBK编码
            response.raise_for_status()  # 检查请求是否成功
            return response.text
        except Exception as e:
            logger.error(f"获取页面失败: {e}")
            return None
    
    def parse_book_info(self, html_content):
        """解析网页内容，提取书籍信息"""
        if not html_content:
            return
        
        soup = BeautifulSoup(html_content, 'html.parser')
        book_list = soup.select('.bang_list li')
        
        for book in book_list:
            try:
                # 提取排名
                rank = book.select_one('.list_num').text.strip()
                
                # 提取书名
                title = book.select_one('.name a')
                title = title.text.strip() if title else "无书名"
                
                # 提取作者
                author = book.select_one('.publisher_info a')
                author = author.text.strip() if author else "无作者"
                
                # 提取出版社
                publisher = book.select('.publisher_info a')[-1] if book.select('.publisher_info a') else None
                publisher = publisher.text.strip() if publisher else "无出版社"
                
                # 提取价格
                price_info = book.select_one('.price .price_n')
                price = price_info.text.strip() if price_info else "无价格"
                
                # 提取原价
                original_price = book.select_one('.price .price_r')
                original_price = original_price.text.strip() if original_price else "无原价"
                
                # 提取评分
                star = book.select_one('.star .tuijian')
                star = star.text.strip() if star else "无评分"
                
                # 提取评论数
                comments = book.select_one('.star a')
                comments = comments.text.strip() if comments else "无评论"
                
                # 保存书籍信息
                book_info = {
                    "排名": rank,
                    "书名": title,
                    "作者": author,
                    "出版社": publisher,
                    "价格": price,
                    "原价": original_price,
                    "评分": star,
                    "评论数": comments
                }
                
                self.all_books.append(book_info)
                logger.info(f"已提取: {title} - {author}")
                
            except Exception as e:
                logger.error(f"解析书籍信息失败: {e}")
    
    def scrape(self, pages=5):
        """抓取指定页数的畅销书数据"""
        logger.info(f"开始抓取当当网畅销书排行榜，共{pages}页")
        
        for page in range(1, pages + 1):
            html_content = self.get_page_content(page)
            self.parse_book_info(html_content)
            
            # 随机延迟，避免被反爬
            delay = random.uniform(1, 3)
            logger.info(f"第{page}页抓取完成，等待{delay:.2f}秒后继续")
            time.sleep(delay)
        
        logger.info(f"抓取完成，共获取{len(self.all_books)}本畅销书信息")
    
    def save_to_csv(self, filename="dangdang_bestsellers.csv"):
        """将抓取的数据保存为CSV文件"""
        if not self.all_books:
            logger.warning("没有数据可保存")
            return
        
        try:
            df = pd.DataFrame(self.all_books)
            df.to_csv(filename, index=False, encoding='utf-8-sig')
            logger.info(f"数据已保存到{filename}，共{len(df)}条记录")
        except Exception as e:
            logger.error(f"保存数据失败: {e}")
    
    def show_summary(self):
        """显示抓取数据的摘要信息"""
        if not self.all_books:
            logger.warning("没有抓取到数据")
            return
        
        df = pd.DataFrame(self.all_books)
        logger.info("\n抓取数据摘要:")
        logger.info(f"总记录数: {len(df)}")
        logger.info("\n前10名畅销书:")
        for i, (_, book) in enumerate(df.head(10).iterrows(), 1):
            logger.info(f"{i}. {book['书名']} - {book['作者']} - {book['价格']}")

# 主程序
if __name__ == "__main__":
    # 日志已配置，无需额外设置
    
    # 创建抓取器实例
    scraper = DangDangScraper()
    
    # 抓取数据（可以指定页数，这里默认抓取5页）
    scraper.scrape(pages=5)
    
    # 保存数据到CSV
    scraper.save_to_csv()
    
    # 显示摘要信息
    scraper.show_summary()