# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request
from urllib import parse
from ..items import MyscrapyItem
from ..utils import common

class PlaySpider(scrapy.Spider):
    name = 'play'
    allowed_domains = ['www.mzitu.com']
    start_urls = ['http://www.mzitu.com/']

    def parse(self, response):

        #获取列表页的每一个跳转到详情页的a超链接
        post_nodes = response.xpath('.//ul[@id="pins"]/li/a')
        # 对每一个a链接进行请求
        for post_node in post_nodes:
            post_url = post_node.xpath('./@href').extract_first()
            post_url = parse.urljoin(response.url,post_url)
            cover_image_url = post_node.xpath('./img/@data-original').extract_first()
            yield Request(url=post_url,callback=self.parse_detail,meta={"cover_image_url":cover_image_url})
        #直到没有下一页为止，否则一直请求下一个列表页
        next_url = response.xpath('.//a[contains(@class,"next")]/@href').extract_first()
        if next_url:
            next_url = parse.urljoin(response.url,next_url)
            yield Request(url=next_url,callback=self.parse)


    def parse_detail(self,response):
        item = MyscrapyItem()
        cover_image_url = response.meta.get("cover_image_url",None)
        title = response.xpath('.//h2[@class="main-title"]/text()').extract_first()
        img_type = response.xpath('.//div[@class="main-meta"]//a[@rel="category tag"]/text()').extract_first()

        item["title"] = title
        item["image_type"] = img_type

        content_img = response.xpath('.//div[@class="main-image"]//a/img/@src').extract_first()
        next_page = response.xpath('.//span[contains(text(),"下一页")]/parent::a/@href').extract_first()
        """
         这里分为两种情况：
        1、当列表页的每一项请求进详情页的第一页的时候，会带进来封面的图片，则此时需要下载封面和详情页的第1页的图片
        2、第1页之后，因为不在传值带有封面，因此只需要下载详情页的每一页图片即可
        """
        if cover_image_url is not None:

            item["cover_image_url"] = [cover_image_url, content_img]
        else:


            item["cover_image_url"] = [content_img]

        #对详情页每一页进行迭代，如果还有下一页，进行请求下一页
        if next_page:
           yield Request(url=parse.urljoin(response.url,next_page),callback=self.parse_detail)
        yield item


