import scrapy, json, time, random, os
from Wallhaven.items import WallhavenItem, PicItem


class WallhavenSpider(scrapy.Spider):
    name = 'wallhaven'
    # allowed_domains = ['wallhaven.cc']
    start_urls = ['http://wallhaven.cc/']
    # 初始页码
    offset = 1
    baseUrl = 'https://wallhaven.cc/search?categories=001&purity=010&topRange=1M&sorting=toplist&order=desc&page='
    # 计数
    nums = 1
    start_urls = [baseUrl + str(offset)]

    # 代理测试
    # start_urls = ['http://httpbin.org/ip']

    # 获取每一页的图片链接
    def parse(self, response):
        # 代理测试
        # res = json.loads(response.text)
        # print(res)
        # 获取最后页码 page1没有需要在page2抓取
        last_index = response.xpath('//header[@class="thumb-listing-page-header"]/h2/text()[2]').extract()
        if len(last_index) != 0:
            last_index = response.xpath('//header[@class="thumb-listing-page-header"]/h2/text()[2]').extract()[0][2:]
            last_index = int(last_index)
        else:
            last_index = 2
        print(f"正在爬取第{self.offset}页")
        # 解析每页中图片的网址，这是一个list
        res = response.xpath('//a[@class="preview"]/@href').extract()
        # 遍历每个图片网址u，提交给parseNext，解析真实图片网址
        for i in res:
            # 每个图片的特定id
            id = i[len('https://wallhaven.cc/w/'):]
            url = 'https://wallhaven.cc/api/v1/w/' + id
            yield scrapy.Request(url, callback=self.parseNext)
        # 自动爬取下一页
        if self.offset < last_index:
            self.offset += 1
            url = self.baseUrl + str(self.offset)
            # yield scrapy.Request(url, callback=self.parse)

    # 从parse获得每个遍历的图片网址，解析真实图片网址
    def parseNext(self, response):
        res = json.loads(response.text)
        # 实例化item
        item = WallhavenItem()
        itemSRC = WallhavenItem()
        # 添加item判断，item多次提交，管道如何选择？
        itemSRC['itemID'] = "PicItem"
        item['itemID'] = "item"
        # 图片链接必须是列表的形式
        imgList = [res['data']['path']]
        # 图片id
        item['id'] = res['data']['id']
        # 图片链接
        item['image_urls'] = imgList
        # 图片大小
        item['resolution'] = res['data']['resolution']
        # 图片纯度
        item['purity'] = res['data']['purity']
        # 图片类别
        item['category'] = res['data']['category']

        itemSRC['image_urls'] = imgList
        itemSRC['id'] = res['data']['id']

        print(f'正在下载第{self.nums}个，{item}')
        self.nums += 1
        # 提交id，url，itemID给管道，下载图片；如果管道没有重写ImagesPipeline类，则直接下载图片
        yield itemSRC
        # 提交所有图片数据，传递给管道，保存到json
        yield item
        # 一个传参给回调函数的样例
        # yield scrapy.Request(item['image_urls'], meta=item, callback=self.saveImg)

    # 下载图片并将图片信息传给管道
    # 手动存储图片
    # def saveImg(self, response):
    #     data = response.meta
    #     name = data['id']
    #     path = "./images"
    #     if not os.path.exists(path):
    #         os.mkdir(path)
    #     with open(path + "/" + name + '.jpg', 'ab') as f:
    #         f.write(response.body)
    #         print(f"第{self.nums}个图片，{name}已经下载完成")
    #     yield data
