# -*- coding: utf-8 -*-
import json
import random
from urllib.parse import quote
import scrapy
from scrapy_redis.spiders import RedisSpider


class BaiduFanRenSpider(RedisSpider):
    name = 'baiduFanRen'
    allowed_domains = ['dushu.baidu.com']
    redis_key = 'myspider:start_urls'  # Redis队列的键名，用于存储起始URL

    def parse(self, response):
        # for i in range(5072, 7622):
        for i in range(5722, 6000):
            # 先构造原始JSON格式的字符串（不手动编码）
            raw_data = f'"book_id":"4315647122","cid":"4315647122|1022{i}", "need_bookinfo":1'
            # 自动对原始字符串进行URL编码（处理双引号等特殊字符）
            data = quote(raw_data)
            base_url = 'https://dushu.baidu.com/api/pc/getChapterContent?data='  # 定义基础URL
            full_url = base_url + '{' + f'{data}' + '}'  # 使用f-string方法构建完整URL，将编码后的数据嵌入URL中
            yield scrapy.Request(url=full_url, callback=self.parse_info,
                                 dont_filter=True, )  # 发送Scrapy请求，指定回调函数为parse_info，设置dont_filter=True避免URL被过滤

    def parse_info(self, response):
        response_text = json.loads(response.text)  # 将响应文本解析为JSON对象
        title = response_text['data']['novel']['chapter_title']  # 从JSON数据中提取章节标题
        content = response_text['data']['novel']['content']  # 从JSON数据中提取章节内容
        yield {  # 返回包含标题和内容的字典
            'title': title,
            'content': content,
        }