# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import os

import scrapy

from ZolSpider.fileUtil import makedirs, findfile
from ZolSpider.strUtil import isNull, confir
from ZolSpider.items import ZolspiderItem
from ZolSpider.settings import IMG_PATH


class zolSpiders(scrapy.Spider):
    name = 'ZolSpider'
    allowed_img_domains = 'http://desk.zol.com.cn/showpic/10000x10000_%s_14.html'
    allowed_domains = ['desk.zol.com.cn']
    start_urls = ['http://desk.zol.com.cn/pc/1.html']
    # start_urls = ['http://lab.scrapyd.cn/archives/55.html']

    def parse(self, response):
        groups = response.css('li.photo-list-padding')

        for data in groups:
            page = confir(isNull(response.css('span.active::text').extract_first()))
            group_title = confir(isNull(data.css('span em::text').extract_first()))
            group_num = isNull(data.css('span::text').extract_first())
            group_url = isNull(self.allowed_domains[0] + data.css('a::attr(href)').extract_first())
            relative_group_url = isNull(data.css('a::attr(href)').extract_first())
            group_date = isNull(data.css('ins::text').extract_first())

            dirs = IMG_PATH + '\\' + page + '_' + group_title + '_' + group_num + '_' + group_date
            # dirs = page +'_'+ group_title+'_'+ group_num+'_'+ group_date

            # 获取当前合集内容，并下载图片
            if relative_group_url is not None:
                # 按照合集生成文件夹
                # self.makedirs(dirs)
                makedirs(dirs=dirs)

                # 根据合集下载相应图片到本地目录
                relative_group_url = response.urljoin(relative_group_url)
                # yield scrapy.Request(relative_group_url, callback=lambda response, dirs=dirs: self.generateImgWebUrl(response, dirs))
                yield scrapy.Request(relative_group_url, meta={'dirs': dirs}, callback=self.generateImgWebUrl)

        next_page = response.css('a#pageNext::attr(href)').extract_first()
        if next_page:
            next_page = response.urljoin(next_page)
            yield scrapy.Request(next_page, callback=self.parse)

    # 获取每个合集中，图片列表页
    # def generateImgWebUrl(self, response, dirs):
    def generateImgWebUrl(self, response):
        imgList = response.css('ul#showImg li')
        # print(imgList)

        for img in imgList:
            # 获取图片编号，并组装后获取图片网页地址
            imgNum = isNull(img.css('a::attr(href)').extract_first())
            if imgNum is not None:
                imgNum = imgNum.split('_')[1]
                imgRealUrl = self.allowed_img_domains % imgNum
                # yield scrapy.Request(imgRealUrl, callback=lambda response, dirs=dirs, imgNum=imgNum: self.generateImgUrl(response, dirs, imgNum))
                yield scrapy.Request(imgRealUrl, meta={'dirs': response.meta['dirs'], 'imgNum': imgNum}, callback=self.generateImgUrl)

    # 逐个每个大图网页
    # def generateImgUrl(self, response, dirs, imgNum):
    def generateImgUrl(self, response):
        item = ZolspiderItem()

        # 图片服务器真实地址
        imgTag = response.xpath("//img/@src").extract_first()
        # item['imgUrl'] = [imgTag]
        # item['title'] = imgNum
        # item['dirsName'] = dirs
        item['imgUrl'] = [imgTag]
        item['title'] = response.meta['imgNum']
        item['dirsName'] = response.meta['dirs']
        # self.log('--------%s' % imgTag)
        # self.log('--------%s' % imgNum)
        yield item

