# -*- coding: utf-8 -*-
# @Time    : 2024/04/01 09:18
# @Author  : Mr.su
# @FileName: __init__.py.py
# @FileDesc:
from CollectSpiders.settings import LOG_FILE_PATH
from CollectSpiders.toots.methods import make_md5
import scrapy, json, datetime, urllib.parse, logging
from CollectSpiders.toots.connects import RedisClient


# noinspection PyAbstractClass,PyMethodMayBeStatic
class CrawlSpider(scrapy.Spider):
    name, domain = 'gucci', 'www.gucci.com'
    redisClient = RedisClient()
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
    }
    custom_settings = {
        'CONCURRENT_REQUESTS': 2,
        'LOG_FILE': LOG_FILE_PATH, 'LOG_LEVEL': 'WARNING'
    }

    def __init__(self, data=None, *args, **kwargs):
        """
        传入自定义参数
        :param data: 数据列表
        :param args:
        :param kwargs:
        """
        super(CrawlSpider, self).__init__(*args, **kwargs)
        self.data = json.loads(urllib.parse.unquote(data)) if data else {
            "pid": "1100", "status": 1, "wait_time": 3600, "brand": "测试",
            "childs": {
                "1101": {
                    "sid": "1101", "label": "男", "upload": 1, "status": 1, "webtype": "taobao",
                    "url": "https://www.gucci.com/hk/en_gb/ca/whats-new/new-in/this-week-men-c-new-men"
                }, "1102": {
                    "sid": "1102", "label": "女", "upload": 1, "status": 1, "webtype": "taobao",
                    "url": "https://www.gucci.com/hk/en_gb/ca/whats-new/new-in/this-week-women-c-new-women"
                }}
        }

    def start_requests(self):
        column = list(self.data['childs'].values())[0]
        if column['status'] == 1:
            column_url = 'https://www.gucci.com/hk/en_gb/c/productgrid?categoryCode={}&show=Page&page={}'
            column_name = column['url'].split('-c-')[-1]
            yield scrapy.Request(
                column_url.format(column_name, 0), headers=self.headers, callback=self.process_lis,
                meta={'column_name': column_name, 'column_url': column_url, 'page': 0, 'column': column}
            )

    def process_lis(self, response):
        column = response.meta['column']
        js = json.loads(response.text)
        has_more = True
        for product in js['products']['items']:
            item = {
                '_id': '',
                'pid': self.data['pid'],
                'sid': column['sid'],
                'status': '0',  # 0:未下载  1:已下载  6:下载失败
                'webtype': column['webtype'],
                'url': 'https://www.gucci.com/hk/en_gb/{}'.format(product['productLink']),
                'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                'brand': self.data['brand'],
                'image': [],
                'domain': self.domain,
                'label': column['label'],
                'upload': column['upload'],
                'name': ''
            }
            result = self.redisClient.conn.sadd('products', make_md5(item['url']))
            if not result:
                has_more = False
                break
            yield scrapy.Request(
                item['url'], headers=self.headers, callback=self.process_detail, meta={'item': item}
            )
        if has_more:
            pg = response.meta['page'] + 1
            if pg < 10:
                column_url = response.meta['column_url']
                column_name = response.meta['column_name']
                yield scrapy.Request(
                    column_url.format(column_name, pg), headers=self.headers, callback=self.process_lis,
                    meta={'column_name': column_name, 'column_url': column_url, 'page': pg, 'column': column}
                )

    def process_detail(self, response):
        product = response.meta['item']
        img_lis = response.xpath('//*[@class="carousel-inner slick-theme-gucci"]/div/picture/source[1]/@srcset').extract()
        product['image'] = ['https:' + i for i in img_lis if i.strip()]
        product['_id'] = make_md5(product['image'][0])
        product['name'] = ''.join(response.xpath('//*[@class="productnameandprice-container-standard"]/h1/text()').extract_first())
        logging.warning('<{}>: 数据id: {}'.format(self.domain, product['_id']))
        yield product
