import requests
from bs4 import BeautifulSoup
import pandas as pd
from urllib.parse import urljoin
import requests
from lxml import html
import pandas as pd
import time
import random
import os
from bs4 import BeautifulSoup
from collections.abc import Mapping  # 已使用正确的导入方式
import argparse  # 新增导入

# 禁用SSL警告
requests.packages.urllib3.disable_warnings()

# 设置请求头
headers = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Accept-Language": "zh-CN,zh;q=0.9",
}
# 基础URL
BASE_URL = "https://www.tup.tsinghua.edu.cn/booksCenter/"


def get_all_pages(start_url):
    """获取所有分页链接"""
    try:
        response = requests.get(start_url, headers=headers, verify=False)
        if response.status_code != 200:
            print(f"请求失败: {start_url}, 状态码: {response.status_code}")
            return None

        # 使用lxml解析页面
        tree = html.fromstring(response.content)
        pagination = tree.xpath('//*[@id="pagecount"]/text()')[0]
 
        if pagination:
            pagelinks = [f'{start_url}&page={i}'  for i in range(1, int(pagination) + 1)]
            return pagelinks
        else:
            returns = [start_url]
    except Exception as e:
        print(f"获取分页失败: {e}")
        return [start_url]


def extract_catalog_text(catalog_url):
    """提取目录页面文本内容"""
    try:
        response = requests.get(catalog_url, headers=headers, verify=False)
        time.sleep(random.uniform(1, 3))  # 随机延迟
        if response.status_code == 200:
            soup = BeautifulSoup(response.content, "html.parser")
            # 提取正文内容，这里可能需要根据实际目录页面结构调整
            content = soup.find("div", class_="books_content") or soup.body
            return content.get_text(separator="\n", strip=True)
    except Exception as e:
        print(f"提取目录失败: {catalog_url}, 错误: {e}")
    return ""


def clean_text(text_list):
    """清理文本数据"""
    if not text_list:
        return ""
    text = " ".join([str(t).strip() for t in text_list if t and str(t).strip()])
    # 去除多余空格和换行
    return " ".join(text.split())


def extract_book_info(book_element):
    """从单个图书元素中提取信息"""
    try:
        # 初始化变量
        isbn = ""
        price = ""
        pub_date = ""  # 确保变量总是有初始值

        # 提取图书详情链接
        url = (
            urljoin(BASE_URL, book_element.find("a")["href"])
            if book_element.find("a")
            else ""
        )
        print(f"提取图书链接: {url}")
        # """获取单本书籍信息"""
        # 请求书籍页面
        response = requests.get(url, headers=headers, verify=False)
        time.sleep(random.uniform(1, 3))  # 随机延迟防止封IP

        if response.status_code != 200:
            print(f"请求失败: {url}, 状态码: {response.status_code}")
            return None

        # 使用lxml解析页面
        tree = html.fromstring(response.content)

        # 使用提供的XPath提取信息
        kj = tree.xpath(
            "/html/body/div[2]/div[1]/div[2]/div[2]/div[3]/ul/li[1]/a[2]/text()"
        )
        if "暂无课件" in str(kj):
            print(f"跳过无课件信息的图书: {url}")
            return None  # 如果没有课件信息，跳过该图书

        name = tree.xpath("/html/body/div[2]/div[1]/div[2]/h4/text()")
        p_element = tree.xpath("/html/body/div[2]/div[1]/div[2]/p[2]")[0].text_content()
        lines = str(p_element).split(f"\r\n")
        for line in lines:
            line = line.strip()
            if "ISBN：" in line:
                isbn = line.replace("ISBN：", "").strip()
            elif "出版日期：" in line:
                pub_date = line.replace("出版日期：", "").strip()
            elif "定价：" in line:
                price = line.replace("定价：", "").strip().replace("元", "")
            elif "作者：" in line:
                author = line.replace("作者：", "").strip()

        # 清理数据
        name = clean_text(name)
        # 提取目录链接
        catalog_link = tree.xpath("/html/body/div[2]/div[2]/div[2]/div[2]/a/@href")

        catalog_url = catalog_link[0] if catalog_link else ""

        # 处理目录内容
        catalog_text = ""
        if catalog_url:
            if not catalog_url.startswith("http"):
                base_url = "https://www.tup.tsinghua.edu.cn/booksCenter/"
                catalog_url = (
                    base_url + catalog_url
                    if not catalog_url.startswith(base_url)
                    else catalog_url
                )
            catalog_text = extract_catalog_text(catalog_url)

        return {
            "名称": name,
            "作者": author,
            "ISBN": isbn,
            "价格": price,
            "出版时间": pub_date,
            "目录链接": catalog_url,
            "目录内容": catalog_text,
        }

    except Exception as e:
        print(f"处理页面时出错: {url}, 错误: {e}")
        return None


def scrape_books_from_page(page_url):
    """从单个页面提取所有图书信息"""
    try:
        response = requests.get(page_url, verify=False)
        soup = BeautifulSoup(response.text, "html.parser")

        # 查找图书列表
        book_list = soup.find("ul", {"id": "csproduct"})
        if not book_list:
            return []

        # 提取所有图书元素
        book_elements = book_list.find_all("li")
        books = []

        for book_element in book_elements:
            book_info = extract_book_info(book_element)
            if book_info:
                books.append(book_info)

        return books
    except Exception as e:
        print(f"从页面提取图书失败: {page_url}, 错误: {e}")
        return []


def main():
    # 解析命令行参数
    parser = argparse.ArgumentParser(description='清华出版社教材检索')
    parser.add_argument('keyword', type=str, help='检索关键词')
    args = parser.parse_args()
    
    # 起始URL (使用命令行输入的keyword参数)
    start_url = f"https://www.tup.tsinghua.edu.cn/booksCenter/booklist.html?keyword={args.keyword}&keytm=8D3832239188956D8D"

    print(f"开始爬取清华大学出版社图书(关键词: {args.keyword})...")

    # 1. 获取所有分页链接
    print("获取所有分页链接...")
    all_page_urls = get_all_pages(start_url)
    print(f"找到 {len(all_page_urls)} 个分页")

    # 2. 遍历每个分页提取图书信息
    all_books = []
    for i, page_url in enumerate(all_page_urls, 1):
        print(f"正在处理第 {i}/{len(all_page_urls)} 页: {page_url}")
        books = scrape_books_from_page(page_url)
        all_books.extend(books)
        print(f"已找到 {len(books)} 本图书，总计 {len(all_books)} 本")

    # 3. 保存结果到Excel
    if all_books:
        df = pd.DataFrame(all_books)
        # 添加engine参数
        df.to_excel(f"{args.keyword}.xlsx", index=False, engine="openpyxl")
        print(f"成功保存 {len(all_books)} 本图书信息到 {args.keyword}.xlsx")
    else:
        print("未找到任何图书信息")


if __name__ == "__main__":
    main()
