# -*- coding: utf-8 -*-
import scrapy
from Bizhi.items import BizhiItem


class BizhiSpider(scrapy.Spider):
    name = 'bizhi'
    allowed_domains = ['netbian.com']
    start_urls = ['http://www.netbian.com']

    def parse(self, response):
    	#获得当前页面所有壁纸的链接
        image_links = response.xpath('//div[@class="list"]/ul/li/a')

        for image in image_links:
            url = self.start_urls[0] + image.xpath('./@href').extract()[0]

            yield scrapy.Request(url, callback=self.getUrl)

        #获得下一页的链接
        next_page = response.xpath('//a[@class="prev"]')
        if len(next_page) == 1:
            if '下' in next_page.xpath('./text()').extract()[0]:
                yield scrapy.Request(self.start_urls[0] + next_page.xpath('./@href').extract()[0], callback=self.parse)
        else:
        	yield scrapy.Request(self.start_urls[0] + next_page[1].xpath('./@href').extract()[0], callback=self.parse)

    #获得壁纸下载页面
    def getUrl(self,response):
    	down_url = self.start_urls[0] + response.xpath('//div[@class="pic-down"]/a/@href').extract()[0]

    	yield scrapy.Request(down_url, callback=self.getP)

   	#获得壁纸的下载链接
    def getP(self,response):
        image_link = response.xpath('//table/tr/td/a/img/@src').extract()[0]
        image_name = response.xpath('//table/tr/td/a/img/@title').extract()[0]

        item = BizhiItem()

        item['imageName'] = image_name
        item['imageLink'] = image_link

        #将壁纸的下载链接传到管道去进行下载
        yield item
