# -*- coding: utf-8 -*-
import scrapy
from sasaone.items import SasaoneItem
import time
import os
import urllib.request


class SasaoneSpiderSpider(scrapy.Spider):
    name = 'sasaone_spider'
    allowed_domains = ['www.sasaone.com']
    start_urls = ['http://www.sasaone.com/yunv/']

    file_path = "D:\\project\\scrapy\\sasaone\\image\\"
    domain_name = "http://www.sasaone.com"
    link_name = "http://www.sasaone.com/yunv/"

    # 获取一级图片并创建文件夹
    def parse(self, response):
        cover_picture_list = response.xpath("//div[@class='aleft']//ul[@class='alist']/li")
        for item in cover_picture_list:
            file_name = item.xpath(".//a//img/@alt").extract_first()
            time1 = item.xpath(".//div[@class='addtime']/text()").extract_first()
            # 日期转换格式
            timeArray = time.strptime(time1, "%y-%m-%d")
            # 转换成时间戳
            time_stamp = int(time.mktime(timeArray))
            timeArray = time.localtime(time_stamp)
            # 转换为指定格式
            otherStyleTime = time.strftime('%Y%m', timeArray)
            second_url = str(otherStyleTime)
            # 创建文件夹
            self.fileIsBeing(file_name)
            detail_url = self.domain_name+item.xpath(".//a/@href").extract_first()
            # yield i_item
            yield scrapy.Request(detail_url, meta={'file_name': file_name, 'second_url': second_url}, callback=self.detail_parse, dont_filter=True)
        # 解析下一页
        # next_link = response.xpath("//div[@class='aleft']//div[@class='pages']//a[last()-1]/@href").extract()
        # if next_link:
        #     next_link = next_link[0]
        #     yield scrapy.Request(self.link_name + next_link, callback=self.parse)

    '''
        获取图片详情里面的图片（二级）
        修改后
    '''
    def detail_parse(self, response):
        file_name = response.meta['file_name']
        second_url = response.meta['second_url']
        detail_pictures = self.domain_name+response.xpath("//div[@class='pic_list']//a[last()]//img/@src").extract_first()
        img_name = detail_pictures.split('/')[-1]
        item = SasaoneItem()
        item['file_path'] = self.file_path + file_name
        item['img_name'] = img_name
        item['detail_pictures'] = detail_pictures

        yield item

        # 获取下一页数据
        next_link = response.xpath("//div[@class='pagelist']//a[last()]/@href").extract_first()
        if next_link != '#':
            yield scrapy.Request(self.link_name+second_url+'/'+next_link, meta={'file_name': file_name, 'second_url': second_url}, callback=self.detail_parse, dont_filter=True)

    '''
        修改前
        获取图片详情里面的图片（二级）
    '''
    def detail_parse_bak(self, response):
        file_name = response.meta['file_name']
        second_url = response.meta['second_url']
        detail_pictures = self.domain_name + response.xpath(
            "//div[@class='pic_list']//a[last()]//img/@src").extract_first()
        img_name = detail_pictures.split('/')[-1]

        file_path = os.path.join(self.file_path + file_name, img_name)
        file_bool = os.path.exists(file_path)
        if file_bool == False:
            urllib.request.urlretrieve(detail_pictures, file_path)

        # 获取下一页数据
        next_link = response.xpath("//div[@class='pagelist']//a[last()]/@href").extract_first()
        print(file_path)
        print(file_bool)
        # return
        if next_link != '#':
            yield scrapy.Request(self.link_name + second_url + '/' + next_link,
                                 meta={'file_name': file_name, 'second_url': second_url},
                                 callback=self.detail_parse, dont_filter=True)

    # 判断文件是否存在
    # 不存在则创建
    def fileIsBeing(self, name):
        path = self.file_path + name
        bool = os.path.exists(path)
        if not (bool):
            os.mkdir(path)
        return path