# -*- coding: utf-8 -*-
# @Time    : 2024/05/06 14:10
# @Author  : Mr.su
# @FileName: burberry.py
# @FileDesc:
from CollectSpiders.settings import LOG_FILE_PATH
from CollectSpiders.toots.methods import make_md5
from CollectSpiders.toots.connects import RedisClient
import scrapy, json, datetime, urllib.parse, logging, re


# noinspection PyAbstractClass,PyMethodMayBeStatic
class CrawlSpider(scrapy.Spider):
    name, domain = 'burberry', 'tw.burberry.com'
    redisClient = RedisClient()
    headers = {
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/123.0.0.0 Safari/537.36',
    }
    custom_settings = {
        'CONCURRENT_REQUESTS': 2,
        'LOG_FILE': LOG_FILE_PATH, 'LOG_LEVEL': 'WARNING'
    }

    def __init__(self, data=None, *args, **kwargs):
        """
        传入自定义参数
        :param data: 数据列表
        :param args:
        :param kwargs:
        """
        super(CrawlSpider, self).__init__(*args, **kwargs)
        self.data = json.loads(urllib.parse.unquote(data)) if data else {
            "pid": "1300", "status": 1, "wait_time": 3600, "brand": "burberry",
            "childs": {
                "1301": {
                    "sid": "1301", "label": "男", "upload": 1, "status": 1, "webtype": "taobao",
                    "url": "https://tw.burberry.com/l/mens-new-arrivals-new-in/"
                }
            }
        }

    def start_requests(self):
        column = list(self.data['childs'].values())[0]
        if column['status'] == 1:
            yield scrapy.Request(
                column['url'], headers=self.headers, callback=self.process_index
            )

    def process_index(self, response):
        cat_datas = json.loads(''.join(re.findall(r'\{"catalogBreadcrumbs":(\[[\s\S]*?\])', response.text)))
        cat_ids = urllib.parse.quote('/'.join([cat_data['id'] for cat_data in cat_datas]))
        if re.match(r'https://tw.burberry.com/.*?/.*?/.*?/', response.url):
            facets = urllib.parse.quote(response.url.split('/')[-2])
            url = 'https://tw.burberry.com/web-api/pages/products?facets={}&location={}&offset={}&limit=20&language=zh&country=TW'
            url = url.format(facets, '{}', '{}')
        else:
            url = 'https://tw.burberry.com/web-api/pages/products?location={}&offset={}&limit=20&language=zh&country=TW'
        yield scrapy.Request(
            url.format(cat_ids, 0 * 20), headers=self.headers, callback=self.process_lis,
            meta={'url': url, 'cat_ids': cat_ids, 'pg': 0}
        )

    def process_lis(self, response):
        column = list(self.data['childs'].values())[0]
        js = json.loads(response.text)
        has_more = True
        for item in js['data']['products'][0]['items']:
            result = self.redisClient.conn.sadd('products', make_md5(item['url']))
            if not result:
                has_more = False
                break
            img_lis = [img['sources'][0]['srcSet'].split(', ')[-1].split(' ')[0] for img in item['medias']]
            product = {
                '_id': make_md5(img_lis[0]),
                'pid': self.data['pid'],
                'sid': column['sid'],
                'status': '0',  # 0:未下载  1:已下载  6:下载失败
                'webtype': column['webtype'],
                'url': 'https://tw.burberry.com{}'.format(item['url']),
                'time': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                'brand': self.data['brand'],
                'image': img_lis,
                'domain': self.domain,
                'label': column['label'],
                'upload': column['upload'],
                'name': item['content']['title']
            }
            logging.warning('<{}>: 数据id: {}'.format(self.domain, product['_id']))
            yield product
        count = js['data']['productsInfo']['total']
        pg_count = count // 20 - 1 if count % 20 == 0 else count // 20
        pg = response.meta['pg']
        if has_more and pg < pg_count:
            pg = pg + 1
            url = response.meta['url']
            cat_ids = response.meta['cat_ids']
            yield scrapy.Request(
                url.format(cat_ids, pg * 20), headers=self.headers, callback=self.process_lis,
                meta={'url': url, 'cat_ids': cat_ids, 'pg': pg}
            )
