import os
import requests
from bs4 import BeautifulSoup
import json
import time
import random

cate = 1500000
# 定义要爬取的URL
url = 'https://weread.qq.com/web/category/' + str(cate)  # 这是微信读书的一个分类页面

# 添加请求头，模拟浏览器请求
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36',
    'Referer': 'https://weread.qq.com/',
    'Accept-Language': 'zh-CN,zh;q=0.9',
    'Accept-Encoding': 'gzip, deflate, br',
    'Connection': 'keep-alive',
    'Accept': 'application/json, text/plain, */*'
}

# 发送HTTP请求
response = requests.get(url, headers=headers)

# 检查请求是否成功
if response.status_code != 200:
    print(f"请求失败，状态码：{response.status_code}")
    exit()

# 检查响应内容类型
content_type = response.headers.get('Content-Type')
if 'application/json' in content_type:
    try:
        data = response.json()
        print("JSON 响应数据：", data)
    except json.JSONDecodeError as e:
        print("JSON 解析错误：", e)
        exit()
else:
    # 解析HTML内容
    soup = BeautifulSoup(response.text, 'html.parser')

    # 提取书籍信息
    books = []
    book_elements = soup.select('.wr_bookList_item_container')

    for book_element in book_elements:
        title_element = book_element.select_one('.wr_bookList_item_title')
        author_element = book_element.select_one('.wr_bookList_item_author')
        image_element = book_element.select_one('.wr_bookCover_img')
        rating_element = book_element.select_one('.wr_bookList_item_reading_percent')

        book = {
            'title': title_element.text.strip() if title_element else '未知',
            'author': author_element.text.strip() if author_element else '未知',
            'rating': rating_element.text.strip() if rating_element else '未知',
            'image_url': image_element['src'] if image_element else ''
        }
        books.append(book)

        # 随机延迟，避免被检测为爬虫
        time.sleep(random.uniform(1, 3))

    # 将书籍信息保存到文件中
    output_dir = os.path.abspath('src/script/books')
    os.makedirs(output_dir, exist_ok=True)
    output_file = os.path.join(output_dir, 'books_'+ str(cate) +'.json')
    with open(output_file, 'w', encoding='utf-8') as f:
        json.dump(books, f, ensure_ascii=False, indent=4)

    print("书籍信息已保存到 books.json 文件中")