# -*- coding: utf-8 -*-
import scrapy
from ..items import IvskyspiderItem


"""
图像处理库：是PIL = Pillow
视频处理库：opencv
"""


class ImagsSpider(scrapy.Spider):
    name = 'imags'
    allowed_domains = ['doutula.com']
    start_urls = [
        'http://www.doutula.com/'
    ]

    # custom_settings
    custom_settings = {
        'ROBOTSTXT_OBEY': False,
        'CONCURRENT_REQUESTS': 16,
        'DOWNLOADER_MIDDLEWARES': {
            'ivskyspider.downloadermiddleware.UserAgentMiddleware': 543,
            # 禁用系统的middleware
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        },
        'ITEM_PIPELINES': {
            # 存储管道
            'ivskyspider.pipelines.SqlitePipeline': 300,

            # 图片下载的管道
            # 'scrapy.pipelines.images.ImagesPipeline': 299,
            # 'ivskyspider.pipelines.DownloaderPipeline': 299,
            'ivskyspider.pipelines.ImagePipeline': 299,

            # 文件下载的管道
            # 'scrapy.pipelines.files.FilesPipeline': 299,
            # 'ivskyspider.pipelines.FilePipeline': 299,
        },
        'DOWNLOAD_DELAY': 0.3,
        'COOKIES_ENABLED': False,
        # 日志文件
        # 'LOG_FILE': 'imgs.log',
        # 写入的日志的级别(共五种级别：INFO消息+警告+错误/WARNING警告/ERROR错误/CRITICAL致命错误/DEBUG调试)
        # 'LOG_LEVEL': 'INFO',

        # 配置图片下载的pipelines
        'IMAGES_URLS_FIELD': 'img_url',
        'IMAGES_STORE': 'images',
        # 缩略图（等比例缩放）
        'IMAGES_THUMBS': {
            'small': (30, 30),
            'middle': (50, 50),
        },

        # 配置pipelines的file pipeline下载
        # 还需要重写方法
        # 没有缩略图的设置
        # 'FILES_URLS_FIELD': 'img_url',
        # 'FILES_STORE': 'imag_files',
    }

    def parse(self, response):
        # 传入下一个方法
        yield scrapy.Request(
            url=response.url,
            meta={},
            dont_filter=True,
            callback=self.parse_all_page,
        )

    def parse_all_page(self, response):
        all_page = response.xpath("//a[@class='page-link']/text()").extract()
        all_page = all_page[-2]

        # test output
        # print(all_page)

        # 构建url
        # https://www.doutula.com/article/list/?page=2
        # https://www.doutula.com/article/list/?page=2
        for page in range(1, 11):
            # 每一页的url
            per_url = f'https://www.doutula.com/article/list/?page={page}'
            # print(per_url)
            yield scrapy.Request(
                url=per_url,
                callback=self.parse_one_page,
                dont_filter=True,
            )
            # TODO 这里测试一页
            break

    def parse_one_page(self, response):
        per_page_imgs = response.xpath("//div[@class='random_article']/div[@class='col-xs-6 col-sm-3']")
        for per_img in per_page_imgs:
            # imgs_url/imgs_name
            imgs_url = per_img.xpath("img/@data-original").extract_first()
            imgs_name = per_img.xpath("p/text()").extract_first()

            # test output
            # print(imgs_name, type(imgs_url))

            # 放在item中
            item = IvskyspiderItem()
            item['img_name'] = imgs_name
            item['img_uri'] = self.settings['IMAGES_STORE'] + '/' + imgs_name + '.' + imgs_url.split(".")[-1]
            item['img_url'] = [imgs_url]
            yield item
