import scrapy
from ScrapyStudio.utils import spider


class DemoSpider(spider.CustomSpider):
    name = 'demo'
    # allowed_domains = ['demo.cn']
    # start_urls = ['http://demo.cn/']
    redis_keys = ['demo:start_url', 'demo-node1:start_url']

    def parse(self, response, **kwargs):
        images: scrapy.Selector = response.xpath('//div[@id="main"]//ul[@class="clearfix"]//img/@src')
        pages: scrapy.Selector = response.xpath('//div[@id="main"]//ul[@class="clearfix"]//a[@target]//@href')
        titles: scrapy.Selector = response.xpath(
            '//div[@id="main"]//ul[@class="clearfix"]//a[@target]/@title | //div[@id="main"]//ul[@class="clearfix"]//a[@target]/b/text()'
        )
        info = zip(images.extract(), pages.extract(), titles.extract())
        for image, page, title in list(info):
            item = dict(name=title,
                        src=f"https://pic.netbian.com/{image}",
                        path=f"download_file/{title}.jpg")
            yield scrapy.Request(url=f"https://pic.netbian.com/{page}", callback=self.parse_detail, meta={'item': item})

    def parse_detail(self, response):
        item = response.meta['item']
        item['size'] = response.xpath('//div[@class="infor"]/p[2]/span/text()').extract()[0]
        yield item
