from datetime import datetime

import json
import scrapy

from books_rank_crawler.items import dedaoBookItem
from books_rank_crawler.settings import FEED_EXPORT_FIELDS_DEDAO


class DedaoSpider(scrapy.Spider):
    name = "dedao"
    allowed_domains = ["www.dedao.cn"]
    custom_settings = {
        'FEED_EXPORT_FIELDS': FEED_EXPORT_FIELDS_DEDAO
    }

    max_page = 10  # 最多爬取页数（防止无限循环）
    current_page = 0  # 当前页码（从0开始）

    def start_requests(self):

        # url地址
        url = 'https://www.dedao.cn/pc/label/v2/algo/pc/product/list'

        # 请求头
        headers = {
            'Content-Type': 'application/json',
            'Referer': 'https://www.dedao.cn/'
        }

        # 请求体
        post_data = {"classfc_name": "热门",
                     "label_id": "X9vmWzAl54WYrJ78ayq1VjKbDeZRxzpvnpXEBOlvko9L026gdm3AnGNMDkG1x8JR", "nav_type": 0,
                     "navigation_id": "ranking:yJdeKqvNJKmXr9WZyGB2aEOLlvYz6p1jv62Qje3n8Ro1Db05kA7qdMV4xgroWEMN",
                     "page": self.current_page, "page_size": 100, "product_types": "2", "request_id": "",
                     "sort_strategy": "HOT",
                     "tags_ids": []}

        yield scrapy.Request(
            url=url,
            method='POST',
            headers=headers,
            body=json.dumps(post_data),  # 将字典转为 JSON 字符串
            callback=self.parse
        )

    def parse(self, response):
        data = json.loads(response.text)
        product_list = data.get("c").get("product_list")

        if not product_list:
            return

        for product in product_list:
            item = dedaoBookItem()  # 创建dedaoBookItem对象
            item["index_img"] = product.get("index_img")  # 图片
            item["name"] = product.get("name")  # 书名
            item["introduction"] = product.get("introduction")  # 简介
            item["lecturer_name"] = product.get("lecturer_name")  # 作者
            item["log_id"] = product.get("log_id")  # 书号
            item["log_type"] = product.get("log_type")  # 类型
            item["price"] = product.get("price") / 100  # 价格
            item["publish_time"] = product.get("publish_time")  # 出版时间
            item["score"] = product.get("score")  # 评分
            item["crawl_time"] = datetime.now().strftime("%Y%m%d")  # 抓取时间
            item["crawl_url"] = response.url  # 抓取url
            print(item)
            yield item

        request_body = json.loads(response.request.body.decode())
        if request_body.get("classfc_name") == "热门":
            return  # 直接返回，不执行分页

        # 分页逻辑
        if self.current_page < self.max_page - 1:  # 检查最大页数限制
            self.current_page += 1  # 直接递增页码

            # 复用之前的请求体
            next_post_data = json.loads(response.request.body.decode())
            next_post_data["page"] = self.current_page

            yield scrapy.Request(
                url=response.url,
                method='POST',
                headers=response.request.headers,
                body=json.dumps(next_post_data),
                callback=self.parse
            )
