import json
import re
from datetime import datetime

from scrapy import Request
from crawler.core import BaseSpider
from crawler.utils.func_tools import get_public_item, md5
from spiders.electronic_business.ssense.extractors import SsenseMainExtractor, SsenseDetailExtractor

__all__ = ['SsenseMainSpider']


# 初步完成

class SsenseMainSpider(BaseSpider):
    name = "electronic_business_main_SSENSE"

    redis_key = 'electronic_business_main_SSENSE'
    default_origin_url = 'https://www.ssense.com/en-hk/men'
    default_proxy_type = 'tunnel'
    default_proxy_demote = False
    default_origin_header = {
        'authority': 'www.ssense.com',
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9',
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
    }

    custom_settings = {
        # 'COOKIES_ENABLED': False,
        'CONCURRENT_REQUESTS': 3,
        'HTTPERROR_ALLOWED_CODES': [403],
        'RETRY_HTTP_CODES': [500, 502, 503, 504, 400, 407, 408, 418, 429, 414],
        'ITEM_PIPELINES': {
            'crawler.pipelines.ElectronicBusinessPipeline': 100,
        },
        'DOWNLOADER_MIDDLEWARES': {
            # 代理
            'crawler.middlewares.proxy.ProxyMiddleware': 200,
            # 自定义
            'spiders.electronic_business.ssense.middlewares.CookiesMiddleware': 300,

        }
    }

    def _get_request_url(self, task):
        url = task['info']['url']
        return url

    def parse(self, response, **kwargs):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        res_json = re.compile('<script>window.INITIAL_STATE=(.*?)</script><script', re.S).findall(response.text)
        if res_json:
            res_text = res_json[0]
            result = SsenseMainExtractor.get_item(response, res_text)
            for info_dict in result:
                url = info_dict['url']
                yield Request(
                    url=url,
                    headers=self.default_origin_header,
                    meta={
                        'task': task,
                        'info_dict': info_dict,
                        'proxy_type': self.default_proxy_type
                    },
                    dont_filter=True,
                    callback=self.parse_detail
                )
            total_page = json.loads(res_text)["products"]["paginationInfo"]["totalPages"]
            if total_page:
                if int(total_page) > 1:
                    for page in range(2, total_page + 1):
                        url = response.url + f"?page={page}"
                        yield Request(
                            url=url,
                            headers=self.default_origin_header,
                            meta={
                                'task': task,
                            },
                            dont_filter=True,
                            callback=self.parse_more_page
                        )

    def parse_more_page(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        res_json = re.compile('<script>window.INITIAL_STATE=(.*?)</script><script', re.S).findall(response.text)
        if res_json:
            res_text = res_json[0]
            result = SsenseMainExtractor.get_item(response, res_text)
            for info_dict in result:
                url = info_dict['url']
                spu_id = info_dict['spu_id']
                yield Request(
                    url=url,
                    headers=self.default_origin_header,
                    meta={
                        'task': task,
                        'info_dict': info_dict,
                        'proxy_type': self.default_proxy_type
                    },
                    dont_filter=True,
                    callback=self.parse_detail
                )

    def parse_detail(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        info_dict = response.meta['info_dict']
        spu_id = info_dict['spu_id']

        task = response.meta['task']
        res_json = re.compile('<script>window.INITIAL_STATE=(.*?)</script><script', re.S).findall(response.text)
        if res_json:
            res_text = res_json[0]
            stock_size_list, atlas_list, composition, product_des, color = SsenseDetailExtractor.get_item(response,
                                                                                                          res_text)

            # new_list = list()
            # for dic in stock_size_list:
            #     item = dict()
            #     item['size'] = dic['size']
            #     item['stock'] = dic['stock']
            #     item['color'] = color
            #     item['atlas'] = atlas_list
            #     new_list.append(item)

            info_dict['color'] = color
            info_dict['atlas'] = atlas_list
            info_dict['size_stock'] = stock_size_list
            info_dict['composition'] = composition
            info_dict['product_des'] = product_des
            info_dict['md5_value'] = md5(info_dict['spu_id'])
            info_dict['origin'] = 'ssense_HKG'
            info_dict['sync'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            item = get_public_item(task, table_name='shedana_data')
            item['item'] = info_dict


            yield item
