# -*- coding: utf-8 -*-
import scrapy
from bizhi.items import MyImageItem

class ImageSpider(scrapy.Spider):
    name = 'image'
    allowed_domains = ['desk.zol.com.cn', 'desk-fd.zol-img.com.cn']
    start_urls = ['http://desk.zol.com.cn/dongman/xiaohuangren/']

    def start_requests(self):
        for url in self.start_urls:
            yield scrapy.Request(url=url, callback=self.parse)
    def parse(self, response):
        """
        获取每页中所有的图片组连接
        :param response:
        :return:
        """
        image_groups = response.xpath('//li[@class="photo-list-padding"]/a[@class="pic"]/@href').extract()
        for group in image_groups:
            yield scrapy.Request(url='http://desk.zol.com.cn' + group, callback=self.get_every_image_parse)

        # 生成下一页
        # next_url = 'http://desk.zol.com.cn' + response.xpath('//a[@id="pageNext"]/@href').extract()[0]
        # yield scrapy.Request(url=next_url, callback=self.parse)

    def get_every_image_parse(self, response):
        """
        获取每组中的所有图片
        :param response:
        :return:
        """
        evary_img_detail_urls = response.xpath('//ul[@id="showImg"]/li/a/@href').extract()
        for url in evary_img_detail_urls:
            yield scrapy.Request(url='http://desk.zol.com.cn' + url, callback=self.down_load_image_parse)

    def down_load_image_parse(self, response):
        """
        将图片设置进行下载
        :param response:
        :return:
        """
        images = response.xpath('//img[@id="bigImg"]/@src').extract()
        # 存在则设置进行下载
        if images:
            item = MyImageItem()
            item['image_urls'] = images
            yield item


