import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from fake_useragent import UserAgent
from ..items import DangdangItem

class ShopSpider(CrawlSpider):
    name = "shop"
    allowed_domains = ["dangdang.com"]
    start_urls = ['http://category.dangdang.com/cid4004344.html']

    # 初始化 UserAgent
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.ua = UserAgent()

    # 定义了一个异步方法 start，用于生成初始请求
    async def start(self):
        yield scrapy.Request(
            url=self.start_urls[0],
            headers={'User-Agent': self.ua.random}
        )

    # 翻页
    # 链接提取器:根据指定规则（allow=“正则”）进行指定。自动发起请求链接的提取,链接提取器遇到重复的链接,去重操作也是默认去除重复的链接。
    link = LinkExtractor(allow=r'/pg\d+-cid4004344.html')

    # 规则解析器: 当链接提取器提取到链接将链接提取器提取到的链接进行指定规则(callback)的解析操作
    rules = (
        Rule(link, callback='parse_item', follow=True),
        # follow=True: 我们可以将链接提取器继续作用到链接提取器提取到的链接所对应的页面中 (实现全部页面链接的提取)
    )

    def parse_item(self, response):
        # 每一页的链接有了，进行信息提取
        all_list = response.xpath('//*[@id="component_47"]/li')
        # 遍历
        for i in all_list:
            item = DangdangItem()
            try:
                item['title'] = i.xpath('./p[2]/a/text()').extract()[0]
                item['money'] = i.xpath('./p[1]/span/text()').extract()[0].strip('¥')
                item['company'] = i.xpath('./p[5]/a/text()').extract()[0]
                item['reviews'] = i.xpath('./p[4]/a/text()').extract()[0]
            except:
                item['reviews'] = 'NOT'
            print(item)

            # 提交管道
            yield item
