import pandas as pd
import parsel
import random
import time
from urllib.error import HTTPError
from requests.exceptions import RequestException
import requests


# 设置基础 URL
base_url = 'https://www.qimao.com/shuku/a-a-a-a-a-a-1-click-'

# 定义多个 User-Agent
user_agents = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Mozilla/5.0 (iPhone; CPU iPhone OS 14_6 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.1.1 Mobile/15E148 Safari/604.1",
]

# 请求头设置
def get_headers():
    return {
        'User-Agent': random.choice(user_agents),
        'Referer': 'https://www.qimao.com',
        'Accept-Language': 'zh-CN,zh;q=0.9',
        'Connection': 'keep-alive',
    }

# 抓取的数据
extracted_data = []

# 控制每分钟请求次数
requests_per_minute = 30  # 每分钟最多请求 30 次
interval_range = (60 / requests_per_minute) * 0.8, (60 / requests_per_minute) * 1.2  # 请求间隔在 80%-120% 范围内波动

# 爬取指定页数
for page in range(1, 665):
    url = base_url + str(page) + '/'  # 拼接URL
    print(f"正在抓取: {url}")

    try:
        headers = get_headers()
        response = requests.get(url, headers=headers)

        if response.status_code != 200:
            raise HTTPError(f"HTTP 错误 {response.status_code}")

        html = response.text

        # 解析HTML
        selector = parsel.Selector(text=html)
        books = selector.css('li')  # 每本书是一个 <li>

        for book in books:
            # 提取书名
            title = book.css('span.s-tit a::text').get()
            if not title:
                continue  # 跳过无效的 li 标签

            # 提取类别、状态和字数
            category = book.css('span.s-tags em::text').get()
            tags_raw = book.css('span.s-tags::text').getall()
            tags_cleaned = "".join(tags_raw).replace("&nbsp;", "").split()

            status = next((tag for tag in tags_cleaned if tag == "连载中" or tag == "完结"), None)
            word_count = next((tag for tag in tags_cleaned if "万字" in tag), None)

            # 提取简介
            description = book.css('span.s-des::text').get()

            # 提取作者和更新时间
            author_update = book.css('span.s-name::text').get()
            if author_update:
                author_update = author_update.replace("&nbsp;", " ")
                parts = author_update.split()
                author = parts[0]  # 作者
                update_time = parts[-1].replace("更新", "").strip()  # 去掉“更新”并清理多余空格
            else:
                author, update_time = None, None

            # 提取图片链接
            img_url = book.css('div.pic img::attr(src)').get()

            #库存数量
            stock_quantity = random.randint(1, 15)

            # 存储数据，将图片链接放在书名之前
            book_info = {
                '书名': title,
                '作者': author,
                '更新时间': update_time,
                '库存数量': stock_quantity,
                '简介': description,
                '图片链接': img_url,
                '类别': category,
                '字数': word_count,
            }
            extracted_data.append(book_info)

            # 打印调试信息
            print(f"书名: {title}, 状态: {status}, 字数: {word_count}, 作者: {author}, 时间: {update_time}, 库存数量: {stock_quantity}")

        print(f"第{page}页爬取成功！！！")
        print("………………………………………………………………………………………………………………")

        # 随机延时控制，间隔在 80%-120% 范围内
        interval = random.uniform(*interval_range)  # 随机产生一个间隔时间
        time.sleep(interval)  # 避免请求过于频繁

    except HTTPError as e:
        if e.code in [429, 403]:  # 处理 429 或 403 错误
            print(f"请求过于频繁，暂停 60 秒后重试...")
            time.sleep(60)
        else:
            print(f"请求失败，错误代码: {e.code}")

    except RequestException as e:
        print(f"请求异常: {e}")

# 保存数据到Excel
if extracted_data:
    df = pd.DataFrame(extracted_data)
    excel_path = 'books_info_nan.xlsx'
    df.to_excel(excel_path, index=False)  # 确保无多余行写入
    print(f"数据已保存到 {excel_path}")
else:
    print("没有提取到任何数据，请检查选择器或网页内容。")
