import re
import time
from datetime import datetime

from scrapy import Request
from crawler.core import BaseSpider
from crawler.utils.func_tools import md5, get_public_item
from spiders.electronic_business.zappos.extractors import ZapposMainExtractor, ZapposDetailMainExtractor

__all__ = ['ZapposMainSpider']


# 差size_stock待确认

class ZapposMainSpider(BaseSpider):
    name = "electronic_business_main_ZAPPOS"

    redis_key = 'electronic_business_main_ZAPPOS'
    default_origin_url = 'https://www.zappos.com/c/the-style-room-women'

    default_origin_request_type = 'FormRequest'
    default_proxy_type = 'abroad'
    default_origin_header = {
        'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
        'accept-language': 'zh-CN,zh;q=0.9',
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36',
    }
    custom_settings = {
        'COOKIES_ENABLED': False,
        'CONCURRENT_REQUESTS': 20,
        'ITEM_PIPELINES': {
            'crawler.pipelines.ElectronicBusinessPipeline': 100,
        }
    }

    def _get_request_url(self, task):
        url = task['url']
        return url

    def parse(self, response, **kwargs):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        if response.meta.get('more_page'):
            total = []
        else:
            total = response.xpath('//span[@class="yl-z"]/a[last()]//text()').extract()

        if total:
            total_page = eval(total[0])
            if total_page > 1:
                for page in range(1, total_page):
                    if self.default_origin_url.endswith('/'):
                        url = self.default_origin_url + f'&p={page}'
                    else:
                        url = self.default_origin_url + f'?p={page}'
                    yield Request(
                        url=url,
                        headers=self.default_origin_header,
                        meta={
                            'task': task,
                            'more_page': True,
                            'proxy_type': self.default_proxy_type
                        },
                        callback=self.parse,
                        dont_filter=True
                    )
        js_text = re.compile('window.__INITIAL_STATE__ = (.*?)</script><script>', re.S).findall(response.text)
        if js_text:
            info_list = ZapposMainExtractor.get_item(response, js_text[0])
            for info_dict in info_list:
                url = info_dict['url']
                yield Request(
                    url=url,
                    headers=self.default_origin_header,
                    meta={
                        'task': task,
                        'info_dict': info_dict,
                        'proxy_type': self.default_proxy_type
                    },
                    callback=self.parse_detail,
                    dont_filter=True
                )

    def parse_detail(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        info_dict = response.meta['info_dict']
        task = response.meta['task']
        js_text = re.compile('window.__INITIAL_STATE__ = (.*?)</script><script>', re.S).findall(response.text)
        if js_text:
            detail_info_dict = ZapposDetailMainExtractor.get_item(response, js_text[0])
            info_dict.update(detail_info_dict)
            info_dict['md5_value'] = md5(info_dict['spu_id'])
            info_dict['origin'] = 'zappos_USA'
            info_dict['sync'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
            item = get_public_item(task, table_name='shedana_data')
            item['item'] = info_dict
            yield item
