import re
import json
from datetime import datetime

from scrapy import Request
from crawler.core import BaseSpider
from crawler.utils.func_tools import md5, get_public_item
from spiders.electronic_business.mrporter.extractors import MrporterMainExtractor
from spiders.electronic_business.mrporter.extractors.extractor import MrporterImageMainExtractor

__all__ = ['MrporterMainSpider']


# 初步完成
class MrporterMainSpider(BaseSpider):
    name = "electronic_business_main_MRPORTER"

    redis_key = 'electronic_business_main_MRPORTER'
    default_origin_url = 'https://www.mrporter.com/en-us/mens/clothing'

    default_origin_request_type = 'FormRequest'
    default_proxy_type = 'tunnel'
    default_proxy_demote = False
    default_origin_header = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
    }
    custom_settings = {
        'COOKIES_ENABLED': False,
        'CONCURRENT_REQUESTS': 3,
        'ITEM_PIPELINES': {
            'crawler.pipelines.ElectronicBusinessPipeline': 100,
        }
    }

    size_dict = {
        'one': 1,
        'two': 2,
        'three': 3,
        'four': 4,
        'five': 5
    }

    def _get_request_url(self, task):
        url = task['url']['href']
        return url

    def parse(self, response, **kwargs):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        total_href = response.xpath('//div[@class="Pagination7__counter"]/span//text()').extract()
        if total_href:
            total_page = total_href[0].split(' ')[-1]

            for page in range(1, int(total_page) + 1):
                url = response.request.url + f'?pageNumber={page}'
                yield Request(
                    url=url,
                    headers=self.default_origin_header,
                    method='GET',
                    meta={
                        'task': task,
                        'proxy_type': self.default_proxy_type
                    },
                    callback=self.parse_html,
                    dont_filter=True
                )

    def parse_html(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        result = MrporterMainExtractor.get_item(response)

        for info_dict in result:
            url = info_dict['url']
            if not url:
                continue
            yield Request(
                url=url,
                headers=self.default_origin_header,
                method='GET',
                meta={
                    'task': task,
                    'info_dict': info_dict,
                    'proxy_type': self.default_proxy_type
                },
                callback=self.parse_detail,
                dont_filter=True
            )

    #
    def parse_detail(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        info_dict = response.meta['info_dict']

        json_text = json.loads(
            re.compile('<script>window.state=(.*?)</script>', re.S).findall(response.text)[0])

        for info in json_text['pdp']['detailsState']['response']['body']['products'][0]['productColours']:
            if info['partNumber'] == info_dict['spu_id']:
                info_dict['composition'] = [_ for _ in info['detailsAndCare'].split('-') if '%' in _][0]
                info_dict['product_des'] = info['detailsAndCare'].replace('-', '')
                info_dict['color'] = info['labelEN']
                info_dict['title'] = info['shortDescription']
                info_dict['atlas'] = ['http:' + info['imageTemplate'].format(view=view, width='400') for view in
                                      info['imageViews']]
                ss_list = list()
                for i in info['sKUs']:
                    ss_dict = dict()
                    if i['buyable']:
                        if i.get("badges"):
                            try:
                                stock = self.size_dict[i['badges'][0]['label'].split(' ')[1].lower()]
                            except:
                                stock = 10
                        else:
                            stock = 10
                        size = i['size']['centralSizeLabel']
                        ss_dict['size'] = size
                        ss_dict['stock'] = stock
                        ss_list.append(ss_dict)
                info_dict['size_stock'] = ss_list
                break
        # info_dict['size_stock'] = size_stock_list
        info_dict['md5_value'] = md5(info_dict['spu_id'])
        info_dict['origin'] = 'mrporter_HKG'
        info_dict['sync'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        item = get_public_item(task, table_name='shedana_data')
        item['item'] = info_dict
        yield item
