import requests
from bs4 import BeautifulSoup

import pandas as pd
import time


def get_book_links():

    books = set()

    for i in range(1, 1385):
        # 目标网址
        url = f'http://www.cmpedu.com/books/list.htm?p={i}'

        # 发送 HTTP 请求
        response = requests.get(url)
        response.encoding = 'utf-8'  # 根据网页的编码设置

        # 解析 HTML 内容
        soup = BeautifulSoup(response.text, 'html.parser')

        ts_solgcontlist = soup.find_all('div', class_='ts_solgcontlist')

        # 查找所有图书链接的标签，这里假设图书链接在<a>标签中
        # 你需要根据实际的 HTML 结构调整选择器

        for item in ts_solgcontlist:
            book_link = item.find('a', href=True)

            if book_link and book_link['href'].startswith('/books/book/'):
                print(f"page {i}", book_link['href'])
                link = f'http://www.cmpedu.com/{book_link["href"]}'
                books.add(link)

        # 过滤出图书链接，这里假设链接以 '/books/' 开头
        # book = [link['href']
        #         for link in book_link if link['href'].startswith('/books/book/')]

        # time.sleep(3)    # 延迟 3 秒，防止被网站识别为爬虫

    return books

# 打印所有图书链接
# print('图书链接：==============>')
# for book in books:
#     print(book)

# 保存到文件


def save_to_file():

    books = get_book_links()
    books = list(books)

    # 保存到 CSV 文件
    df = pd.DataFrame(books, columns=['book_link'])
    df.to_csv('book_links.csv', index=False)


if __name__ == '__main__':
    save_to_file()
