import json
import re
from datetime import datetime

from scrapy import Request
from crawler.core import BaseSpider
from crawler.utils.func_tools import md5, get_public_item
from spiders.electronic_business.matchesfashion.extractors import MatchesFashionMainExtractor, \
    MatchesFashionDetailExtractor

__all__ = ['MatchesFashionMainSpider']


# 初步完成 px3
class MatchesFashionMainSpider(BaseSpider):
    name = "electronic_business_main_MATCHESFASHION"

    redis_key = 'electronic_business_main_MATCHESFASHION'
    default_origin_url = 'https://www.matchesfashion.com/intl/mens/shop/clothing/denim'
    default_origin_cookie = {
        'language': 'en',
        'country': 'HKG',
        'indicativeCurrency': '',
        'billingCurrency': 'EUR',
    }

    default_proxy_type = 'tunnel'

    default_origin_header = {
        'authority': 'www.matchesfashion.com',
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9',
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
    }

    custom_settings = {
        # 'COOKIES_ENABLED': False,
        'CONCURRENT_REQUESTS': 3,
        'HTTPERROR_ALLOWED_CODES': [403],  #
        'ITEM_PIPELINES': {
            'crawler.pipelines.ElectronicBusinessPipeline': 100,
        },
        'DOWNLOADER_MIDDLEWARES': {
            # 代理
            'crawler.middlewares.proxy.ProxyMiddleware': 200,
            # 自定义
            'spiders.electronic_business.matchesfashion.middlewares.CookiesMiddleware': 300,

        }
    }

    def _get_request_url(self, task):
        url = task['info']['url']
        # category = task['info']['category']
        return url

    def parse(self, response, **kwargs):
        self.logger.info(f'Request End, URL: {response.url}')
        if response.status == 403:
            print(response.text)
        task = response.meta['task']
        json_text = re.compile('<script id=".*type="application/json">(.*?)</script></body></html>', re.S).findall(
            response.text)

        if json_text:
            result_list = MatchesFashionMainExtractor.get_item(response, json_text[0])
            for info_dict in result_list:
                url = info_dict['url']
                yield Request(
                    url=url,
                    headers=self.default_origin_header,
                    meta={
                        'task': task,
                        'info_dict': info_dict,
                        'proxy_type': self.default_proxy_type,
                    }, dont_filter=True,
                    callback=self.parse_detail
                )
            if not response.meta.get('more_page'):
                total_page = json.loads(json_text[0])["props"]["pageProps"]["searchResults"]["data"]["pagination"][
                    "numberOfPages"]
                # 判断是不是more_page,要写这个饿
                if total_page and int(total_page) > 1:
                    for page in range(1, total_page):
                        url = response.request.url + f'?pageOffset={page}'
                        yield Request(
                            url=url,
                            headers=self.default_origin_header,
                            meta={
                                'task': task,
                                'more_page': True,
                                'proxy_type': self.default_proxy_type,

                            }, dont_filter=True,
                            callback=self.parse
                        )

    def parse_detail(self, response):
        if response.status == 403:
            print(response.text)
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        info_dict = response.meta['info_dict']
        json_text = re.compile('<script id=".*type="application/json">(.*?)</script></body></html>', re.S).findall(
            response.text)
        if json_text:
            detail = MatchesFashionDetailExtractor.get_item(response, json_text[0])
            info_dict.update(detail)

            info_dict['md5_value'] = md5(info_dict['spu_id'])
            info_dict['origin'] = 'matchesfashion_HKG'
            info_dict['sync'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            # item = get_public_item(task, table_name='shedana_data')
            item = get_public_item(task, table_name='shedana_data')
            item['item'] = info_dict
            yield item
