# -*- coding:utf-8 -*-
import datetime
import scrapy
import urllib
import tools
import os
import random
from scrapy.http import Request

class IndexSpider(scrapy.spiders.Spider):
    name = "image"  # 定义爬虫名
    # allowed_domains = ["vxinghe.com"]  # 搜索的域名范围，也就是爬虫的约束区域，规定爬虫只爬取这个域名下的网页
    start_urls = ["http://www.beeg-sex.com"]
    count = 10

    def parse(self, response):
        baseUrl = "http://www.beeg-sex.com"
        filePath = "/Users/lax/Desktop/" + datetime.datetime.now().strftime('%Y-%m-%d') + "/"
        tools.mkdir(filePath)

        # if re.match('http://vxinghe.com/news/c\d\d+.html', response.url):#如果url能够匹配到需要爬取的url，就爬取
        basePath = '//div[@class="item"]'
        srcPath = './/div[@class="image"]/a/img/@src'
        namePath = './/div[@class="info"]/a/em/text()'
        for hxs in response.xpath(basePath):
            src = hxs.xpath(srcPath).extract_first()  # 查询所有img标签的src属性
            name = hxs.xpath(namePath).extract_first()
            print('--------------->>>>>>>>>>>>>>', os.path.exists(filePath+"Amateur.jpg"), src)
            if name == None or name == "":
                name = random.randint(1000, 9999)
            if src:
                absoluteSrc = src
                file_name = "%s.jpg" % (name)
                file_path = os.path.join(filePath, file_name)

                if not os.path.exists(file_path):
                    print('-----save---------->>>>>>>>>>>>>>', os.path.exists(file_path), file_name)
                    urllib.urlretrieve(absoluteSrc, file_path)  # 接收文件路径和需要保存的路径，会自动去文件路径下载并保存到我们指定的本地路径

        all_urls = hxs.xpath('//a/@href').extract()#提取界面所有的url
        for url in all_urls:#遍历获得的url，如果满足条件，继续爬取
            if url.startswith('/'):
                yield Request(baseUrl + url, callback=self.parse)
