# -*- coding: utf-8 -*-
import scrapy
from meizitu.items import MeizituItem, MeizituPicturesUrlsItem
from scrapy import Selector, Request
import re


class MzituSpider(scrapy.Spider):
    name = 'mzitu'
    allowed_domains = ['mzitu.com']
    # 以列表页地址作为起始URL，省去以其他地址为起始URL时的分类分析的麻烦
    start_urls = ['http://www.mzitu.com/all']

    def parse(self, response):
        """
        解析列表页面
        """
        # 提取列表页的所有主题的链接地址
        urls_list = response.xpath("//div[@class='all']//li//a/@href").extract()
        # 提取所有主题的标题
        titles_list = response.xpath("//div[@class='all']//li//a/text()").extract()

        if len(urls_list) == len(titles_list):
            url_title_list = dict(zip(titles_list, urls_list))
            # print(url_title_list)

        # 详情页地址及主题写入文件进行测试
        # with open("./test.txt", "a") as f:
        #     for i in range(len(urls_list)):
        #         # print(titles_list[i], urls_list[i], "\n")
        #         f.write(titles_list[i] + ":" + urls_list[i] + "\n")

        for title, url in url_title_list.items():
            dir_info = title
            # print("="*20)
            # print(title)
            # print(url)
            item = MeizituItem(dir_info=dir_info, url=url)
            yield item
            # print(url)
            yield Request(url=url, callback=self.parse_picture_page)

    def parse_picture_page(self, response):
        """
        解析图片页面，并实现翻页
        """

        # 重新构造请求头，主要是实现Referer地址的变换（以当前reponse的url作为图片请求的Referer）
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,image/jpeg,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.9'}

        # 提取图片名字
        pic_name = response.xpath("//div[@ class='main']//h2/text()").extract()
        # 提取图片url地址
        pic_url = response.xpath("//div[@class='main-image']//a/img/@src").extract()
        # 将自定义图片下载用的headers
        headers["Referer"] = re.sub(r"[^/]?(\d+)$", "", response.url)
        pic_headers = headers

        # 将图片名字、图片地址、图片下载用headers存入Items中
        if pic_name and pic_url:
            pic_info = (pic_headers, pic_url[0], pic_name[0])
            # print("="*20)
            # print(pic_info)
            item = MeizituPicturesUrlsItem(pic_info=pic_info)
            yield item

        # 提取下一页地址
        next_page_url = Selector(response).re(u'<a href="(\S*)"><span>下一页»</span></a>')
        # 判断下一页地址是否为空列表，不为空则请求下一页
        if next_page_url:
            next_page_url = next_page_url[0]
            # print(next_page_url)
            # 构造下一页请求的Referer地址
            headers["Referer"] = response.url
            # 请求下一页
            yield Request(url=next_page_url, callback=self.parse_picture_page, headers=headers)
