# -*- coding: utf-8 -*-
import scrapy
from ..items import AoisolasItem


class AoisolaspiderSpider(scrapy.Spider):
    name = "AoiSola"
    allowed_domains = ["umei.cc"]
    start_urls = ['http://www.umei.cc/weimeitupian/1.htm']

    def parse(self, response):
        lis = response.xpath('//div[contains(@class,"TypeList")]/ul/li')
        for img in lis:
            # img_name = img.xpath('.//a/span/text()').extract_first()
            img_url = img.xpath(".//a/@href").extract_first()
            img_url2 = str(img_url)
            print('I am first one: ', img_url2)
            # http: // www.umei.cc / weimeitupian / 648.htm
            base_url = 'http://www.umei.cc/weimeitupian/'
            suffix = '.htm'
            # for i in range(648):
            #     # 下一页
            #     yield response.follow(base_url+str(i+2)+suffix, callback=self.parse)
            yield scrapy.Request(img_url2, callback=self.content)

    def content(self, response):
        item = AoisolasItem()
        img_tag = response.xpath('//div[contains(@class,"ImageBody")]/p/img')
        name = img_tag.xpath('.//@alt').extract_first()
        item['name'] = str(name).split('(')[0]
        print('I am second one: ',  item['name'])
        item['ImgUrl'] = img_tag.xpath('.//@src').extract()
        yield item
        # 提取图片,存入文件夹
        # print(item['ImgUrl'])
        final_li = response.xpath('//div[contains(@class,"NewPages")]/ul/li')[-1]
        next_url = final_li.xpath('.//a/@href').extract_first()
        print('I am third one :', str(next_url))
        if next_url is not None and next_url != '#':
            # 下一页
            yield response.follow('http://www.umei.cc'+str(next_url), callback=self.content)


"""
防盗链的核心是判断你请求的地址是不是来自本服务器，若是，则给你图片，不是则不给，
知道了这么一个原则我们就可以破解了，我们每下载一张图片都先伪造一个妹子服务器的请求，
然后在下载，那它肯定会给你返回图片，于是我们就能顺利拿到图片！那问题的重点就归结为如何伪造请求地址，
scrapy实现起来灰常简单，就是写一个Middleware
"""

