# -*- coding: utf-8 -*-
import random
from scrapy_redis.spiders import RedisSpider
import scrapy
import os


class ImagesSpider(RedisSpider):
    name = 'images'
    # allowed_domains = ['http://sc.chinaz.com/tupian/']
    # start_urls = ['http://sc.chinaz.com/tupian//']
    redis_key = 'huiger'
    def __init__(self, *args, **kwargs):
        domain = kwargs.pop("domains", "")
        self.alllowed_domains = filter(None, domain.split(','))
        super(ImagesSpider, self).__init__(*args, **kwargs)

    def parse(self, response):
        '''
        获取各类标签的分类名称跟分类url
        :param response:
        :return:
        '''
        classify_names = response.xpath('//*[@class="flh"]/text()').extract()
        classify_urls = response.xpath('//*[@class="flh"]/@href').extract()

        classify_list = []
        for i in range(len(classify_names)):
            classify_dict = {
                'classify_name': classify_names[i],
                'classify_url': 'http://sc.chinaz.com' + classify_urls[i]
            }
            classify_list.append(classify_dict)
        for i in classify_list:
            yield scrapy.Request(
                url=i['classify_url'],
                callback=self.get_classify,
                meta={'huiger': i},
                dont_filter=True
            )

    def get_classify(self, response):
        '''
        请求个分类的url，从页面中获取到所有图片的html地址
        因为图片太小，要下载大图片必须请求给url,从中获取到大图片的url
        :param response:
        :return:
        '''
        data = response.meta['huiger']
        image_urls = response.xpath('//*[@id="container"]/div/div/a/@href').extract()
        image_names = response.xpath('//*[@id="container"]/div/div/a/@alt').extract()
        for i in range(len(image_urls)):
            data['image_name'] = image_names[i]
            data['image_url'] = image_urls[i]
            yield scrapy.Request(
                url=image_urls[i],
                callback=self.get_image,
                meta={'huiger': data},
                dont_filter=True
            )
        # 翻页
        next_page = response.xpath('//*[@class="nextpage"]/@href').extract_first()
        if next_page is not None:
            data = response.meta['huiger']
            url = 'http://sc.chinaz.com/tupian/'+next_page
            yield scrapy.Request(
                url=url,
                callback=self.get_classify,
                meta={'huiger':data},
                dont_filter=True
            )

    def get_image(self, response):
        data = response.meta['huiger']
        # data是一个字典，按照字典中的name值存储到对应的文件夹中
        # {'classify_name': '人物图片', 'classify_url': 'http://sc.chinaz.com/tupian/renwutupian.html'
        # 'image_name':'乌克兰美女', 'image_url':‘http://......’}
        image = response.xpath('//*[@class="imga"]/a/img/@src').extract_first()
        data['image_url'] = image
        yield scrapy.Request(
            url=image,
            callback=self.downloads_image,
            meta={'huiger': data},
            dont_filter=True
        )

    def downloads_image(self, response):
        '''
        下载图片到本地
        :param response:
        :return:
        '''
        data = response.meta['huiger']
        dir_name = data['classify_name']
        image_name = data['image_name'] + '.jpg'
        if os.path.exists(r'D:/code/站长素材图片下载/' + dir_name):
            # 该分类文件夹存在时
            # 判断该文件是否存在
            if os.path.exists(r'D:/code/站长素材图片下载/' + dir_name + '/' + image_name):
                name = data['image_name'] + str(random.randint(0, 100)) + '.jpg'
                with open(r'D:/code/站长素材图片下载/' + dir_name + '/' + name, 'wb') as f:
                    f.write(response.body)
                    print('{}下载完成'.format(name))
            else:
                with open(r'D:/code/站长素材图片下载/' + dir_name + '/' + image_name, 'wb') as f:
                    f.write(response.body)
                    print('{}下载完成'.format(image_name))
        else:
            # 不存在时创建该分类文件夹
            os.mkdir(r'D:/code/站长素材图片下载/' + dir_name)
            # 判断该图片是否存在
            if os.path.exists(r'D:/code/站长素材图片下载/' + dir_name + '/' + image_name):
                name = data['image_name'] + str(random.randint(0, 100))+ '.jpg'
                with open(r'D:/code/站长素材图片下载/' + dir_name + '/' +name, 'wb') as f:
                    f.write(response.body)
                    print('{}下载完成'.format(name))
            else:
                with open(r'D:/code/站长素材图片下载/' + dir_name + '/' + image_name, 'wb') as f:
                    f.write(response.body)
                    print('{}下载完成'.format(image_name))
