import json
import math
import traceback
from datetime import datetime

from scrapy import Request
from crawler.core import BaseSpider
from crawler.utils.func_tools import get_public_item, md5

# 未测试完成
__all__ = ['SelfRidgesMainSpider']

from spiders.electronic_business.selfridges.extractors import SelfRidgesMainExtractor, SelfRidgesDetailExtractor


class SelfRidgesMainSpider(BaseSpider):
    name = "electronic_business_main_SELFRIDGES"
    redis_key = 'electronic_business_main_SELFRIDGES'

    default_origin_url = 'https://www.baidu.com'
    default_proxy_type = 'tunnel'
    default_proxy_demote = False
    default_origin_header = {
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/109.0.0.0 Safari/537.36',
    }
    custom_settings = {
        # 'COOKIES_ENABLED': False,
        'CONCURRENT_REQUESTS': 4,
        'ITEM_PIPELINES': {
            'crawler.pipelines.ElectronicBusinessPipeline': 100,
        },
        'DOWNLOADER_MIDDLEWARES': {
            # 代理
            'crawler.middlewares.proxy.ProxyMiddleware': 200,
            # 自定义
            'spiders.electronic_business.selfridges.middlewares.CookiesMiddleware': 300,

        }

    }

    def parse(self, response,**kwargs):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']

        total = json.loads(response.text)
        total_page = math.ceil(int(total['recordSetTotal']) / 60)

        for page in range(1, int(total_page) + 1):
            # for page in range(1, 2):
            yield Request(
                url=self.default_origin_url,
                headers=self.default_origin_header,
                method='GET',
                meta={
                    'page': str(page),
                    'task': task,
                    'proxy_type': self.default_proxy_type
                },
                callback=self.parse_html,
                dont_filter=True
            )

    #
    def parse_html(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        result = SelfRidgesMainExtractor.get_item(response)
        for res in result:
            image_url = res['image_url']
            # stock_url=res['stock_url']
            yield Request(
                url=image_url,
                headers=self.default_origin_header,
                meta={
                    'info_dict': res,
                    'task': task,
                },
                dont_filter=True,
                callback=self.get_image_info

            )

    def get_image_info(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        info_dict = response.meta['info_dict']
        # ['set']['item']['set']['item']
        # 获取图片合集atlas
        img_url_list = list()
        if 'getImgSet' in response.text:
            json_text = json.loads(response.text.split('/*jsonp*/getImgSet(')[1].split(',"");')[0])
        else:
            json_text = json.loads(response.text.split('/*jsonp*/getImgSe(')[1].split(',"");')[0])
        if isinstance(json_text.get('set').get('item'), dict):
            for img in json_text['set']['item']['set']['item']:

                try:
                    if img.get('set'):
                        if isinstance(img['set']['item'], list):
                            for i in img['set']['item']:
                                img_url = 'https://images.selfridges.com/is/image/' + i['i']['n']
                                img_url_list.append(img_url)
                        else:
                            img_url = 'https://images.selfridges.com/is/image/' + img['set']['item']['i']['n']
                            img_url_list.append(img_url)
                    else:
                        img_url = 'https://images.selfridges.com/is/image/' + img['i']['n']
                        img_url_list.append(img_url)
                except Exception as e:
                    traceback.print_exc()

        elif isinstance(json_text.get('set').get('item'), list):
            for img in json_text['set']['item']:
                try:
                    if img.get('set'):
                        if isinstance(img['set']['item'], list):
                            for i in img['set']['item']:
                                img_url = 'https://images.selfridges.com/is/image/' + i['i']['n']
                                img_url_list.append(img_url)
                        else:
                            img_url = 'https://images.selfridges.com/is/image/' + img['set']['item']['i']['n']
                            img_url_list.append(img_url)
                    else:
                        img_url = 'https://images.selfridges.com/is/image/' + img['i']['n']
                        img_url_list.append(img_url)
                except Exception as e:
                    traceback.print_exc()

        img_url_list = list(set(img_url_list))
        img_url_list.remove('https://images.selfridges.com/is/image/selfridges/')
        info_dict['atlas'] = img_url_list
        # 请求详情页
        yield Request(
            url=info_dict['url'],
            headers=self.default_origin_header,
            meta={
                'task': task,
                'info_dict': info_dict,
            },
            callback=self.parse_detail,
            dont_filter=True
        )

    def parse_detail(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        info_dict = response.meta['info_dict']
        if isinstance(SelfRidgesDetailExtractor.get_item(response), list):
            product_des = ','.join(SelfRidgesDetailExtractor.get_item(response))
        else:
            product_des = SelfRidgesDetailExtractor.get_item(response)
        info_dict['product_des'] = product_des
        # info_dict['text'] = response.text
        i_list = list()
        if info_dict['product_des']:
            for i in info_dict['product_des'].split(','):
                if not i:
                    continue
                if '%' in i or '％' in i:
                    i_list.append(i)
        info_dict['composition'] = ','.join(i_list)
        del info_dict['image_url']
        # info_dict['specifications_url'] = specifications_url
        # info_dict['specifications'] = specifications
        info_dict['md5_value'] = md5(info_dict['spu_id'])
        stock_url = info_dict['stock_url']
        yield Request(
            url=stock_url,
            headers=self.default_origin_header,
            meta={
                'info_dict': info_dict,
                'task': task,
            },
            dont_filter=True,
            callback=self.total

        )

    def total(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        size_stock_list = response.text
        task = response.meta['task']
        info_dict = response.meta['info_dict']
        detail_info_list = list()
        for i in json.loads(size_stock_list)['stocks']:
            if i['Stock Quantity Available to Purchase'] == 0:
                continue
            item = dict()
            item['size'] = i['value']
            item['stock'] = i['Stock Quantity Available to Purchase']
            # item['color'] = info_dict['color']
            # item['atlas'] = info_dict['atlas']
            detail_info_list.append(item)
        del info_dict['stock_url']
        # info_dict['detail_info'] = detail_info_list
        info_dict['size_stock'] = detail_info_list
        info_dict['origin'] = 'selfridges_HKG'
        info_dict['sync'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        item = get_public_item(task, table_name='shedana_data')
        item['item'] = info_dict

        # 判断如果没有 商品描述，说明请求遇到问题， 直接pass
        if info_dict['product_des']:
            yield item
