# -*- coding: utf-8 -*-
# @Time    : 2024/11/14 15:49
# @Author  : Mr.su
# @FileName: net-a-porter.py
# @FileDesc:
from CollectSpiders.settings import LOG_FILE_PATH
from CollectSpiders.toots.methods import make_md5
import scrapy, json, datetime, urllib.parse, logging
from CollectSpiders.toots.connects import RedisClient


# noinspection PyAbstractClass,PyMethodMayBeStatic
class CrawlSpider(scrapy.Spider):
    name, domain = 'net-a-porter', 'www.net-a-porter.com'
    redisClient = RedisClient()
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/130.0.0.0 Safari/537.36',
    }
    custom_settings = {
        'CONCURRENT_REQUESTS': 2,
        'LOG_FILE': LOG_FILE_PATH, 'LOG_LEVEL': 'WARNING'
    }

    def __init__(self, data=None, *args, **kwargs):
        """
        传入自定义参数
        :param data: 数据列表
        :param args:
        :param kwargs:
        """
        super(CrawlSpider, self).__init__(*args, **kwargs)
        self.data = json.loads(urllib.parse.unquote(data)) if data else {
            "pid": "2300", "status": 1, "wait_time": 3600, "brand": "net-a-porter",
            "childs": {
                "2301": {
                    "sid": "2301", "label": "服装", "upload": 1, "status": 1, "webtype": "taobao",
                    "url": "https://www.net-a-porter.com/zh-sg/shop/%E5%93%81%E7%89%8C/alaia?pageNumber=2"
                }
            }
        }
        self.column = list(self.data['childs'].values())[0]

    def start_requests(self):
        start_url = 'https://www.net-a-porter.com/api/nap/search/resources/store/nap_sg/productview/byCategory?attrs=true&category={}&locale=zh_CN&pageNumber={}&pageSize=60'
        url_format = '%2F' + '%2F'.join(self.column['url'].split('?')[0].split('/')[-2:])
        if self.column['status'] == 1:
            yield scrapy.Request(
                start_url.format(url_format, 1), headers=self.headers, callback=self.process_lis,
                # meta={'start_url': start_url, 'url_format': url_format, 'pg': 1}
            )

    def process_lis(self, repsonse):
        js = json.loads(repsonse.text)
        pg_count = js['recordSetTotal'] // 60 if js['recordSetTotal'] % 60 == 0 else js['recordSetTotal'] // 60 + 1
        for product in js['products']:
            item = {
                '_id': product['productId'],
                'pid': self.data['pid'],
                'sid': self.column['sid'],
                'status': '0',  # 0:未下载  1:已下载  6:下载失败
                'webtype': self.column['webtype'],
                'url': 'https://www.net-a-porter.com/zh-sg/shop/product{}'.format(product['seo']['seoURLKeyword']),
                'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                'brand': self.data['brand'],
                'image': [],
                'domain': self.domain,
                'label': self.column['webtype'],
                'upload': self.column['label'],
                'name': product['shortDescription']
            }
            yield scrapy.Request(
                item['url'], headers=self.headers, callback=self.process_detail,
                meta={'product': item}
            )
        if repsonse.meta['pg'] < pg_count:
            pg = repsonse.meta['pg'] + 1
            start_url = repsonse.meta['start_url']
            yield scrapy.Request(
                start_url.format(repsonse.meta['url_format'], pg), headers=self.headers, callback=self.process_lis,
                meta={'start_url': repsonse.meta['start_url'], 'url_format': repsonse.meta['url_format'], 'pg': pg}
            )

    def process_detail(self, response):
        product = response.meta['product']
        product['image'] = ['https:' + i for i in response.xpath('//*[@class="ImageCarousel88__track"]//li//noscript/img/@src').extract()]
        product['_id'] = make_md5(product['image'][0])
        logging.warning('<{}>: 数据id: {}'.format(self.domain, product['_id']))
        yield  product
