# -*- coding: utf-8 -*-
import scrapy
from scrapy.selector import Selector
from scrapy.http import Request
from xiaohua_spider.items import XiaohuaSpiderItem

class XiaohuaSpider(scrapy.Spider):
    name = 'xiaohua'
    allowed_domains = ['xiaohuar.com']
    start_urls = ['http://www.xiaohuar.com/hua/']
    custom_settings = {
        'DEFAULT_REQUEST_HEADERS': {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en',
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36',
        }
    }

    # 存放待爬取的url，scrapy会自动去重和重试失败链接，我们只需要考虑往url中添加为爬取的url。
    url_set = set()


    def parse(self, response):
        """ 请求首页图集列表之后得到列表页，解析获得图集详情页地址 """
        allPic = Selector(response).xpath('//div[@class="img"]/a')
        # if os.path.exists('校花合集'):
        #     os.chdir('校花合集')
        # else:
        #     os.mkdir('校花合集')
        #     os.chdir('校花合集')
        for pic in allPic:
            detail_url = pic.xpath('.//@href').extract_first()
            if detail_url in self.url_set:
                # 已爬取过
                pass
            else:
                self.url_set.add(detail_url)
                # 画廊页url "http://www.xiaohuar.com/s-1-2015.html"
                # 详情页url "http://www.xiaohuar.com/p-1-2015.html"
                # 路由存在规律，可以直接转换，节省一次requests请求和xpath解析。
                picUrl = detail_url.replace('/p', '/s')
                yield Request(url=picUrl, callback=self.pic_parse)

    def pic_parse(self, response):
        allImg = Selector(response).xpath('//div[@class="inner"]/a/img/@src').extract()
        title = Selector(response).xpath('//h1/text()').extract_first()
        for src in allImg:
            img_name = src.split('/')[-1]
            imgUrl = src
            """
            由于网站开发技术历史原因，有路由形式访问的
            /d/file/20181216/smalla1b386aa7989cdd9c8d58ca64fb217671544975719.jpg
            也有访问静态服务器接口形式的
            https://wx.dxs6.cn/api/xiaohua/upload/min_img/1535739717795-2018-09-01.jpg
            所以给加个判断。
            """
            if imgUrl.startswith('https'):
                pass
            else:
                imgUrl = 'http://www.xiaohuar.com' + imgUrl
            # 另一种写法 item = XiaohuaSpiderItem(title=title, img_name=img_name, imgUrl=imgUrl)
            item = XiaohuaSpiderItem()
            item['title'] = title
            item['img_name'] = img_name
            item['imgUrl'] = imgUrl
            yield item 