# -*- coding: utf-8 -*-
"""
1、怎么把图片修改成自己想要的图片名称
2、怎么加水印？

results没有数据可能是因为配置写错了。

"""
import scrapy
import logging
from ..items import IvskyspiderItem


class IvskySpider(scrapy.Spider):
    name = 'ivsky'
    allowed_domains = ['ivsky.com']
    start_urls = ['https://www.ivsky.com/']

    custom_settings = {
        'ROBOTSTXT_OBEY': False,
        'CONCURRENT_REQUESTS': 4,
        'DOWNLOAD_DELAY': .3,
        'COOKIES_ENABLED': False,
        'DEFAULT_REQUEST_HEADERS': {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'en',
        },
        'ITEM_PIPELINES': {
            # 下载图片的管道
            'IvskySpider.pipelines.ImagePipeline': 299,

            # 'IvskySpider.pipelines.IvskyspiderPipeline': 300,
            'IvskySpider.pipelines.SqlitePipeline': 300,

        },

        # 配置图片路径和下载路径
        'IMAGES_URLS_FIELD': 'img_url',
        'IMAGES_STORE': 'images',

        # 设置缩略图
        'IMAGES_THUMBS': {
            'small': (64, 64),
            'big': (128, 128),
        },
        # 日志文件的配置
        'LOG_FILE': 'a.log',
        # 日志的五种级别从高到低级别分别为 CRITICAL / ERROR / WARNING / INFO / DEBUG
        # debug会囊括所有级别的日志信息，而CRITICAL则不会囊括其他四种类型，只会写入critical级别的日志
        'LOG_LEVEL': 'INFO',
        'LOG_FORMAT': '%(asctime)s [%(name)s] %(levelname)s: %(message)s',

    }
    # critial error warning info debug
    def parse(self, response):
        self.log("准备打印日志信息：", level=logging.DEBUG)
        try:
            a = []
            print(a[0])
        except Exception as e:
            self.log('这是一个error，我来测试日志级别的存储方式', logging.ERROR)
        yield scrapy.Request(
            callback=self.parse_nav,
            url=response.url,
            dont_filter=True,
            meta={},
        )

    def parse_nav(self, response):
        # 解析主页网页源码
        navs = response.xpath("//ul[@id='menu']/li/a")[1:]
        for nav in navs:
            nav_url = 'https://www.ivsky.com' + nav.xpath("@href").extract_first()
            nav_title = nav.xpath("text()").extract_first()
            # print(nav_url, nav_title)
            yield scrapy.Request(
                url=nav_url,
                callback=self.parse_big_cate,
                dont_filter=True,
                meta={
                    'nav_title': nav_title,
                }
            )

    def parse_big_cate(self, response):
        # 根据nav_url获取大分类的所有链接
        if response.meta['nav_title'] == '图片大全':
            all_big_cates = response.xpath("//ul[@class='tpmenu']/li/a")[1:]
        else:
            all_big_cates = response.xpath("//ul[@class='bzmenu']/li/a")[1:]

        # 更简单的一种写法
        # all_big_cates = response.xpath("//ul[contains(@class, 'menu')]/li/a")[1:]

        # 获取一下meta
        meta = response.meta
        # 获取每个分类的小分类
        for per_big_cate in all_big_cates:
            big_cate_url = per_big_cate.xpath("@href").extract_first()
            big_cate_title = per_big_cate.xpath("text()").extract_first()
            meta['big_cate_title'] = big_cate_title

            # test output
            # print(big_cate_url, big_cate_title)
            big_cate_url = 'https://www.ivsky.com' + big_cate_url
            yield scrapy.Request(
                url=big_cate_url,
                callback=self.parse_small_cate,
                dont_filter=True,
                meta=meta,
            )

    def parse_small_cate(self, response):
        # 获取小分类
        all_small_cates = response.xpath("//div[@class='sline']/div/a")

        # 获取meta
        meta = response.meta
        for per_small_cate in all_small_cates:
            small_cate_url = 'https://www.ivsky.com' + per_small_cate.xpath("@href").extract_first()
            small_cate_title = per_small_cate.xpath("text()").extract_first()

            meta['small_cate_title'] = small_cate_title

            # test output
            # print(meta['nav_title'], meta['big_cate_title'], small_cate_title, small_cate_url)
            yield scrapy.Request(
                url=small_cate_url,
                callback=self.parse_all_page,
                dont_filter=True,
                meta=meta,
            )

    def parse_all_page(self, response):
        # 解析每一页
        # 获取meta
        meta = response.meta
        all_imgs = response.xpath("//div[@class='left']/ul[@class='pli']/li//img")
        for img in all_imgs:
            img_title = img.xpath("@alt").extract_first()
            img_url = 'https://' + img.xpath("@src").extract_first().replace('/t/', '/pre/')[2:]
            # test output
            print(meta['nav_title'], meta['big_cate_title'], meta['small_cate_title'], img_title, img_url)
            # 已经得到所有图片的地址以及信息
            item = IvskyspiderItem()
            item['img_nav_cate'] = meta['nav_title']
            item['img_big_cate'] = meta['big_cate_title']
            item['img_small_cate'] = meta['small_cate_title']
            item['img_url'] = [img_url]
            item['img_title'] = img_title
            item['img_path'] = ''
            yield item

        # 根据当前页获取下一页的url
        next_page = response.xpath("//a[contains(text(),'下一页')]/@href").extract_first()
        if next_page:
            next_page = f'https://www.ivsky.com{next_page}'
            print(next_page)
            yield scrapy.Request(
                url=next_page,
                callback=self.parse_all_page,
                dont_filter=True,
                meta=meta,
            )
