import requests
from lxml import etree
import os
import logging
from concurrent.futures import ThreadPoolExecutor, as_completed
import random

# 配置日志
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')

# User-Agent列表
USER_AGENTS = [
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.131 Safari/537.36',
    'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.63 Safari/537.36'
]

def fetch_book_list(url):
    headers = {
        'User-Agent': random.choice(USER_AGENTS),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
    }

    try:
        response = requests.get(url, headers=headers, timeout=5)
        response.raise_for_status()
    except requests.exceptions.RequestException as e:
        logging.error(f"请求失败: {e} for URL: {url}")
        return []

    html = response.text
    tree = etree.HTML(html)

    # 提取书籍详情页链接
    book_links = tree.xpath('//a[@class="fleft"]/@href')
    base_url = 'https://book.douban.com'
    full_links = [base_url + link for link in book_links]

    return full_links

def fetch_douban_book_details(url):
    headers = {
        'User-Agent': random.choice(USER_AGENTS),
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
        'Accept-Language': 'en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7'
    }

    try:
        response = requests.get(url, headers=headers, timeout=5)
        response.raise_for_status()
    except requests.exceptions.RequestException as e:
        logging.error(f"请求失败: {e} for URL: {url}")
        return None

    html = response.text
    tree = etree.HTML(html)

    # 提取书名
    title = tree.xpath('//span[@property="v:itemreviewed"]/text()')
    title = title[0].strip() if title else "未知"

    # 提取作者
    author = tree.xpath('//a[@rel="v:author"]/span/text()')
    author = ', '.join(author).strip() if author else "未知"

    # 提取出版社
    publisher = tree.xpath('//span[@class="pl"][contains(text(), "出版社:")]/following-sibling::a/text()')
    publisher = publisher[0].strip() if publisher else "未知"

    # 提取出版年份
    publication_year = tree.xpath('//span[@class="pl"][contains(text(), "出版年:")]/following-sibling::text()')
    publication_year = publication_year[0].strip().replace('/', '').strip() if publication_year else "未知"

    # 提取ISBN
    isbn = tree.xpath('//span[@class="pl"][contains(text(), "ISBN:")]/following-sibling::text()')
    isbn = isbn[0].strip().replace('/', '').strip() if isbn else "未知"

    # 提取简介
    summary = tree.xpath('//div[@class="intro"]//p/text()')
    summary = '\n'.join(summary).strip() if summary else "无简介"

    # 打印提取的信息
    logging.info(f"书名: {title}")
    logging.info(f"作者: {author}")
    logging.info(f"出版社: {publisher}")
    logging.info(f"出版年份: {publication_year}")
    logging.info(f"ISBN: {isbn}")
    logging.info(f"简介: {summary}")

    # 返回提取的信息（可选）
    return {
        'title': title,
        'author': author,
        'publisher': publisher,
        'publication_year': publication_year,
        'isbn': isbn,
        'summary': summary
    }

def save_book_details(book_details):
    if book_details:
        # 清理文件名中的非法字符
        file_name = f"{book_details['title'].replace('/', '_').replace('\\', '_').replace(':', '_').replace('*', '_').replace('?', '_').replace('"', '_').replace('<', '_').replace('>', '_').replace('|', '_')}.txt"
        try:
            with open(file_name, 'w', encoding='utf-8') as file:
                file.write(f"书名: {book_details['title']}\n")
                file.write(f"作者: {book_details['author']}\n")
                file.write(f"出版社: {book_details['publisher']}\n")
                file.write(f"出版年份: {book_details['publication_year']}\n")
                file.write(f"ISBN: {book_details['isbn']}\n")
                file.write(f"简介: {book_details['summary']}\n")
            logging.info(f"详细信息已保存到 {file_name}")
        except IOError as e:
            logging.error(f"保存文件失败: {e}")

def main():
    list_url = 'https://book.douban.com/chart?subcat=all&icn=index-topchart-popular'
    book_urls = fetch_book_list(list_url)

    max_workers = min(32, os.cpu_count() + 4)  # 动态调整线程池大小
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        future_to_url = {executor.submit(fetch_douban_book_details, url): url for url in book_urls}
        for future in as_completed(future_to_url):
            url = future_to_url[future]
            try:
                book_details = future.result()
                save_book_details(book_details)
            except Exception as exc:
                logging.error(f'{url} 生成异常: {exc}')

if __name__ == "__main__":
    main()
