from scrapy import Request
from scrapy.spider import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from scrapy_mzitu.items import ScrapyMzituItem


class Spider(CrawlSpider):
    name = 'mzitu'
    start_urls = ['http://www.mzitu.com']
    img_urls = []
    rules = (
        Rule(LinkExtractor(allow=('com/\d{1,6}',), deny=('com/\d{1,6}/\d',)),
             callback='parse_item', follow=True),
    )

    def parse_item(self, response):
        name = response.xpath('//h2[@class="main-title"]/text()').extract_first(default='N/A')
        url = response.url
        page_max = response.xpath('//div[@class="pagenavi"]/a[last()-1]/span/text()').extract_first(default='N/A')
        for page in range(1, int(page_max)+1):
            page_url = response.url + '/' + str(page)
            yield Request(url=page_url, callback=self.image_url, meta={'name': name, 'url': url})

    def image_url(self, response):
        item = ScrapyMzituItem()
        item['name'] = response.meta['name']
        item['url'] = response.meta['url']
        item['image_urls'] = []
        img_url = response.xpath('//div[@class="main-image"]//img/@src').extract()[0]
        item['image_urls'].append(img_url)
        yield item

