import os
import requests
from bs4 import BeautifulSoup
import csv
import time

BASE_URL = 'https://www.springer.com/series/16916/books?page={}'
CSV_FILE = 'springer_books.csv'
DEBUG_DIR = 'debug_files'  

if not os.path.exists(DEBUG_DIR):
    os.makedirs(DEBUG_DIR)

def get_book_links(page_number):
    url = BASE_URL.format(page_number)
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3'}
    response = requests.get(url, headers=headers)
    
    with open(f"{DEBUG_DIR}/page_{page_number}.html", "w", encoding="utf-8") as f:
        f.write(response.text)
    print(f"Saved page {page_number} to debug_files")

    if response.status_code != 200:
        print(f"请求失败: {url} 状态码: {response.status_code}")
        return []
    
    soup = BeautifulSoup(response.text, 'html.parser')
    links = []
    for a_tag in soup.select('.c-card__title > a[data-track="click"]'):
        link = a_tag.get('href')
        if link and link.startswith('https://www.springer.com/book/'):
            links.append(link)
    
    print(f"Page {page_number} found {len(links)} links: {links[:3]}...")
    return links

def parse_book_page(url):
    filename = f"{DEBUG_DIR}/book_{url.split('/')[-1]}.html"
    # 新增本地文件检查逻辑
    if os.path.exists(filename):
        with open(filename, 'r', encoding='utf-8') as f:
            html_content = f.read()
        print(f"Using cached file: {filename}")
    else:
        response = requests.get(url)
        html_content = response.text
        with open(filename, "w", encoding="utf-8") as f:
            f.write(html_content)
        print(f"Saved book page to {filename}")
    
    soup = BeautifulSoup(html_content, 'html.parser')
    data = {}

    # 新增书名解析逻辑
    title_tag = soup.find('h1', {'data-test': 'book-title'})
    data['Book Title'] = title_tag.text.strip() if title_tag else "N/A"

    # 解析元数据项
    items = soup.select('.c-bibliographic-information__list-item')
    for item in items:
        key_span = item.select_one('.u-text-bold')
        value_span = item.select_one('.c-bibliographic-information__value')
        if key_span and value_span:
            key = key_span.text.strip()
            data[key] = value_span.text.strip()
    
    required_fields = [
        "Book Title", "Authors", "Series Title", "DOI", "Publisher",
        "Hardcover ISBN", "Softcover ISBN", "eBook ISBN",
        "Series ISSN", "Series E-ISSN", "Edition Number", "Number of Pages"
    ]
    for field in required_fields:
        data.setdefault(field, "N/A")

    return data

def main():
    links = []
    if os.path.exists('links.csv'):
        with open('links.csv', 'r', encoding='utf-8') as f:
            links = [row[0] for row in csv.reader(f)]
    else:
        all_links = []
        for page in range(1, 7):  
            print(f"Processing page {page}")
            all_links.extend(get_book_links(page))
        with open('links.csv', 'w', newline='', encoding='utf-8') as f:
            csv.writer(f).writerows([[link] for link in all_links])
        links = all_links

    if not links:
        print("未找到任何书籍链接，请检查爬取逻辑或网站结构是否有变化")
        return

    all_books = []
    for idx, link in enumerate(links):
        try:
            print(f"Processing book {idx+1}/{len(links)}: {link}")
            book_data = parse_book_page(link)
            all_books.append(book_data)
            time.sleep(1)
        except Exception as e:
            print(f"Error on {link}: {str(e)}")

    print(f"Total books parsed: {len(all_books)}")
    if all_books:
        with open(CSV_FILE, 'w', newline='', encoding='utf-8') as f:
            writer = csv.DictWriter(f, fieldnames=all_books[0].keys())
            writer.writeheader()
            writer.writerows(all_books)
        print(f"数据已保存到 {CSV_FILE}")
    else:
        print("未获取到任何书籍数据")

if __name__ == '__main__':
    main()