import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule


class QidianspiderSpider(CrawlSpider):
    name = 'qidianSpider'
    allowed_domains = ['qidian.com']
    start_urls = ['https://my.qidian.com/']

    rules = (
        Rule(LinkExtractor(allow=r'Items/'), callback='parse_item', follow=True),
    )

    def start_requests(self):
        cookies = '隐去cookie'
        cookies = {i.split("=")[0]:i.split("=")[1] for i in cookies.split("; ")}
        yield scrapy.Request(
            self.start_urls[0],
            callback=self.parse_item,
            cookies=cookies
        )

    def parse_item(self, response):
        with open("./test.html", 'w', encoding='utf-8') as f:
            f.write(response.body.decode())

