#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import scrapy
from bs4 import BeautifulSoup
from scrapy.http import Request
from scrapy.loader import ItemLoader, Identity
from downloadPictures.MeiziItem import MeiziItem


class MeiziSpider(scrapy.Spider):
    name = "Meizi"
    allowed_domains = ["meizitu.com"]
    # start_urls是我们准备爬的初始页
    start_urls = [
        "http://www.meizitu.com/a/list_1_1.html",
    ]

    def parse(self, response):
        soup = BeautifulSoup(response.body, "html5lib")
        for imageItem in soup.find_all("div", class_="pic"):
            yield Request(imageItem.a.get("href"), callback=self.parse_Item)
        pageDiv = soup.find("div", id="wp_page_numbers")
        if pageDiv is not None:
            for pageLi in pageDiv.ul.findAll("li"):
                page_href = pageLi.a
                if page_href is not None:
                    if page_href.string == "下一页":
                        yield Request('http://www.meizitu.com/a/%s'
                                      % page_href.get("href"),
                                      callback=self.parse)
        else:
            print("未找到下一页的标签")

    def parse_Item(self, response):
        print("开始采集《" + response.url + "》的数据！")
        mzItem = ItemLoader(item=MeiziItem(), response=response)
        # 名字
        mzItem.add_xpath('name', '//h2/a/text()')
        # 标签
        mzItem.add_xpath(
            'tags', "//div[@id='maincontent']/div[@class='postmeta  clearfix']"
            + "/div[@class='metaRight']/p")
        # 图片连接
        mzItem.add_xpath('image_urls', "//div[@id='picture']/p/img/@src",
                         Identity())
        # url
        mzItem.add_value('url', response.url)

        return mzItem.load_item()
