# -*- coding:utf-8 -*-
from scrapy import Request
from scrapy.spiders import Spider, CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapyExercise.items import QidianHotItem
from scrapy.loader import ItemLoader
from scrapy.loader.processors import MapCompose
import logging

class HotSalesSpider(Spider):
    name = 'hot'

    # start_urls = ['https://www.qidian.com/rank/hotsales?style=1&page=1']
    # def parse(self, response, **kwargs):
    #     list_selector = response.xpath("//div[@class='book-mid-info']")
    #     for one_selector in list_selector:
    #         name = one_selector.xpath("h4/a/text()").extract()[0]
    #         author = one_selector.xpath("p[1]/a[1]/text()").extract()[0]
    #         hot_dict = {"name": name,
    #                     "author": author}
    #         yield hot_dict

    # qidian_headers = {"user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36 Edg/84.0.522.63"}
    # def start_requests(self):
    #     url = 'https://www.qidian.com/rank/hotsales?style=1&page=1'
    #     yield Request(url, headers=self.qidian_headers, callback=self.qidian_parse)
    # def qidian_parse(self, response, **kwargs):
    #     list_selector = response.xpath("//div[@class='book-mid-info']")
    #     for one_selector in list_selector:
    #         name = one_selector.xpath("h4/a/text()").extract()[0]
    #         author = one_selector.xpath("p[1]/a[1]/text()").extract()[0]
    #         hot_dict = {"name": name, "author": author}
    #         yield hot_dict

    # start_urls = ['https://www.qidian.com/rank/hotsales?style=1&page=1']
    # def parse(self, response, **kwargs):
    #     list_selector = response.css("[class='book-mid-info']")
    #     for one_selector in list_selector:
    #         name = one_selector.css("h4>a::text").extract()[0]
    #         author = one_selector.css(".author a::text").extract()[0]
    #         hot_dict = {"name": name,
    #                     "author": author}
    #         yield hot_dict

    # start_urls = ['https://www.qidian.com/rank/hotsales?style=1&page=1']
    # def parse(self, response, **kwargs):
    #     list_selector = response.xpath("//div[@class='book-mid-info']")
    #     for one_selector in list_selector:
    #         item = QidianHotItem()
    #         item['name'] = one_selector.xpath("h4/a/text()").extract()[0]
    #         item['author'] = one_selector.xpath("p[1]/a[1]/text()").extract()[0]
    #         yield item

    def name_dosomething(self, i):
        logging.info(i)
        return '我被处理了'

    def name2_dosomething(self, i):
        return '%sII' % i

    def author_dosomething(self, i):
        logging.info(i)
        return '也被处理了'

    start_urls = ['https://www.qidian.com/rank/hotsales?style=1&page=1']
    def parse(self, response, **kwargs):
        list_selector = response.xpath("//div[@class='book-mid-info']")
        for one_selector in list_selector:
            #生成ItemLoader的实例
            #参数item接收QidianHotItem实例，selector接收一个选择器
            nove1 = ItemLoader(item=QidianHotItem(), selector=one_selector)
            nove1.add_xpath("name", "h4/a/text()", MapCompose(self.name_dosomething, self.name2_dosomething))
            nove1.add_css("author", ".author a::text", MapCompose(self.author_dosomething))
            nove1.add_value("form", "连载")
            yield nove1.load_item()

class HotSalesCrawlSpider(CrawlSpider):
    name = 'hot_crawl'
    allowed_domains = ['www.qidian.com']
    start_urls = ['https://www.qidian.com/rank/hotsales?style=1&page=1']

    # rules相当于parse方法，第一个链接和没有指定callback方法的，请求后的页面都会根据rules规则提取链接
    rules = (
        Rule(LinkExtractor(restrict_xpaths='//*[contains(@class,"next")]')),
        Rule(LinkExtractor(restrict_xpaths='//*[@itemprop="url"]'), callback='item_parse')
    )

    def item_parse(self, response, **kwargs):
        pass