import requests
from bs4 import BeautifulSoup
import csv
import time
import random
from concurrent.futures import ThreadPoolExecutor
import logging

# 配置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    filename='dangdang_crawler.log'
)

# 使用固定User-Agent列表替代fake-useragent
USER_AGENTS = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Safari/605.1.15',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36 Edg/91.0.864.59'
]

# 全局配置
CONFIG = {
    'max_workers': 5,  # 线程池大小（建议不要太大，避免被封）
    'timeout': 15,  # 请求超时时间
    'retry_times': 3,  # 重试次数
    'max_pages': 500,  # 最大爬取页数(每页约20条)
    'output_file': 'dangdang_data.csv',
    'keywords': ['Python', 'Java', '机器学习', '编程', '数据库', '算法'],  # 多个关键词分散爬取
    'proxies': None,  # 如果需要可以配置代理池
    'min_delay': 1,  # 最小延迟(秒)
    'max_delay': 3,  # 最大延迟(秒)
    'long_delay': 10  # 长时间延迟(秒)
}


def get_random_headers():
    """生成随机请求头"""
    return {
        'User-Agent': random.choice(USER_AGENTS),
        'Referer': 'http://www.dangdang.com/',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Accept-Encoding': 'gzip, deflate',
        'Connection': 'keep-alive',
    }


def get_page_data(keyword, page):
    """获取单页数据"""
    url = 'http://search.dangdang.com/'
    params = {
        'key': keyword,
        'page_index': page,
        'sort_type': 'sort_sale_amt_desc'  # 按销量排序
    }

    for attempt in range(CONFIG['retry_times']):
        try:
            response = requests.get(
                url,
                params=params,
                headers=get_random_headers(),
                proxies=CONFIG['proxies'],
                timeout=CONFIG['timeout']
            )
            response.encoding = 'gb2312'

            if "验证码" in response.text:
                logging.warning(f"触发验证码: {keyword} 第{page}页 尝试{attempt + 1}")
                time.sleep(CONFIG['long_delay'])  # 遇到验证码等待更长时间
                continue

            if response.status_code != 200:
                logging.warning(f"状态码异常: {response.status_code}")
                time.sleep(CONFIG['long_delay'])
                continue

            return parse_list_page(response.text, keyword)

        except requests.exceptions.RequestException as e:
            logging.error(f"获取页面失败({attempt + 1}): {e}")
            time.sleep(random.uniform(CONFIG['min_delay'], CONFIG['max_delay']))

    return []


def parse_list_page(html, keyword):
    """解析列表页"""
    soup = BeautifulSoup(html, 'html.parser')
    book_list = soup.find('ul', {'class': 'bigimg'})
    if not book_list:
        return []

    books = []
    items = book_list.find_all('li')[:20]  # 每页最多20个商品

    for item in items:
        try:
            title_tag = item.find('a', {'name': 'itemlist-title'})
            if not title_tag:
                continue

            title = title_tag.get('title', '').strip()
            if not title:
                title = title_tag.get_text(strip=True)

            price_tag = item.find('span', {'class': 'search_now_price'})
            price = price_tag.get_text(strip=True) if price_tag else '无'

            author_tag = item.find('a', {'name': 'itemlist-author'})
            author = author_tag.get_text(strip=True) if author_tag else '无'

            publisher_tag = item.find('a', {'name': 'P_cbs'})
            publisher = publisher_tag.get_text(strip=True) if publisher_tag else '无'

            pub_date_tag = item.find('span', {'class': 'search_book_author'})
            pub_date = pub_date_tag.find_all('span')[-1].get_text(strip=True) if pub_date_tag else '无'

            comment_tag = item.find('a', {'name': 'itemlist-review'})
            comment_count = comment_tag.get_text(strip=True) if comment_tag else '0'

            link_tag = item.find('a', {'name': 'itemlist-picture'})
            link = link_tag.get('href', '') if link_tag else ''

            books.append({
                '关键词': keyword,
                '书名': title,
                '价格': price.replace('¥', '').strip(),
                '作者': author,
                '出版社': publisher,
                '出版日期': pub_date.replace('/', '').strip(),
                '评论数': comment_count.replace('条评论', '').strip(),
                '商品链接': link
            })
        except Exception as e:
            logging.error(f"解析商品条目失败: {e}")
            continue

    return books


def save_data(data):
    """保存数据到CSV"""
    if not data:
        return

    keys = data[0].keys()
    with open(CONFIG['output_file'], 'a', newline='', encoding='utf-8-sig') as f:
        writer = csv.DictWriter(f, fieldnames=keys)
        if f.tell() == 0:  # 如果是空文件才写表头
            writer.writeheader()
        writer.writerows(data)


def crawl_task(keyword, start_page, end_page):
    """单个爬取任务"""
    total_books = 0
    for page in range(start_page, end_page + 1):
        try:
            books = get_page_data(keyword, page)
            if not books:
                logging.info(f"{keyword} 第{page}页无数据，可能已达末尾")
                break

            save_data(books)
            total_books += len(books)
            logging.info(f"成功保存 {keyword} 第{page}页 {len(books)}条数据，总计: {total_books}")

            # 随机延迟
            delay = random.uniform(CONFIG['min_delay'], CONFIG['max_delay'])
            time.sleep(delay)

            # 每10页增加一次延迟
            if page % 10 == 0:
                time.sleep(CONFIG['long_delay'])

        except Exception as e:
            logging.error(f"爬取任务出错: {e}")
            time.sleep(CONFIG['long_delay'])

    return total_books


def main():
    """主函数"""
    start_time = time.time()
    total_collected = 0

    # 清空或创建输出文件
    open(CONFIG['output_file'], 'w', encoding='utf-8-sig').close()

    # 计算每个关键词需要爬取的页数
    pages_per_keyword = CONFIG['max_pages'] // len(CONFIG['keywords'])

    with ThreadPoolExecutor(max_workers=CONFIG['max_workers']) as executor:
        futures = []
        for i, keyword in enumerate(CONFIG['keywords']):
            start_page = i * pages_per_keyword + 1
            end_page = (i + 1) * pages_per_keyword
            futures.append(executor.submit(crawl_task, keyword, start_page, end_page))

        for future in futures:
            try:
                total_collected += future.result()
            except Exception as e:
                logging.error(f"任务执行出错: {e}")

    end_time = time.time()
    logging.info(f"爬取完成! 共获取 {total_collected} 条数据，耗时: {(end_time - start_time) / 60:.2f}分钟")


if __name__ == '__main__':
    main()