import json
import re
from datetime import datetime

from scrapy import Request
from crawler.core import BaseSpider
from crawler.utils.func_tools import md5, get_public_item
from spiders.electronic_business.lv.extractors import LOUMainExtractor

__all__ = ['LouMainSpider']


class LouMainSpider(BaseSpider):
    name = "electronic_business_main_LV"

    redis_key = 'electronic_business_main_LV'
    default_origin_url = 'https://fr.louisvuitton.com/eng-gb/cadeaux/cadeaux-pour-lui/_/N-t143cszt'

    default_proxy_type = 'tunnel'

    default_origin_header = {
        'authority': 'api-www.lv.cn',
        'accept': 'application/json, text/plain, */*',
        'accept-language': 'zh-CN,zh;q=0.9',
        'client_id': '607e3016889f431fb8020693311016c9',
        'client_secret': '60bbcdcD722D411B88cBb72C8246a22F',
        'origin': 'https://www.louisvuitton.cn',
        'referer': 'https://www.louisvuitton.cn/zhs-cn/bags/for-women/twist/_/N-t1wyh5qg?page=0',
        'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36',
    }

    custom_settings = {
        # 'COOKIES_ENABLED': False,
        'CONCURRENT_REQUESTS': 3,
        'ITEM_PIPELINES': {
            'crawler.pipelines.ElectronicBusinessPipeline': 100,
        }
    }

    # def _get_request_url(self, task):
    #     url_list = [
    #         # 'https://www.louisvuitton.cn/zhs-cn/new/for-women/the-latest/_/N-t18gb9e5',
    #         'N-t18gb9e5'
    #     ]
    #     # url = task['info']['url']
    #     # category = task['info']['category']
    #     url = 'https://api-www.louisvuitton.cn/eco-eu/search-merch-eapi/v1/eng-gb/plp/products/{}?page=0'.format(
    #         url_list[0])
    #     return url
    def start_task(self, task):

        uri = task['info']['uri']
        url = 'https://api.louisvuitton.com/eco-eu/search-merch-eapi/v1/eng-gb/plp/products/{}?page=0'.format(
            uri)
        yield Request(
            url=url,
            headers=self.default_origin_header,
            meta={
                'proxy_type': self.default_proxy_type,
                'task': task,
                'uri': uri
            },
            dont_filter=True,
            callback=self.parse
        )

    def parse(self, response, **kwargs):
        self.logger.info(f'Request End, URL: {response.url}')
        uri = response.meta['uri']
        task = response.meta['task']
        if 'cfmd' in response.text:
            info_list = LOUMainExtractor.get_item(response)
            for info_dict in info_list:
                spu_id = info_dict['spu_id']
                yield Request(
                    url='https://api.louisvuitton.com/api/eng-gb/checkout/shipping/sku/{}/delivery-date'.format(
                        spu_id),
                    headers=self.default_origin_header,
                    meta={
                        'task': task,
                        'info_dict': info_dict,
                        'proxy_type': self.default_proxy_type,

                    }, dont_filter=True,
                    callback=self.parse_detail

                )

        total_page = json.loads(response.text).get('nbPages')
        if total_page > 0:
            for page in range(1, total_page):
                url = 'https://api.louisvuitton.com/eco-eu/search-merch-eapi/v1/eng-gb/plp/products/{}?page={}'.format(
                    uri, page)
                yield Request(
                    url=url,
                    headers=self.default_origin_header,
                    meta={
                        'proxy_type': self.default_proxy_type,
                        'task': task
                    },
                    callback=self.parse_more_page,
                    dont_filter=True
                )

    def parse_more_page(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        if 'cfmd' in response.text:
            info_list = LOUMainExtractor.get_item(response)
            for info_dict in info_list:
                spu_id = info_dict['spu_id']
                yield Request(
                    url='https://api.louisvuitton.com/api/eng-gb/checkout/shipping/sku/{}/delivery-date'.format(
                        spu_id),
                    headers=self.default_origin_header,
                    meta={
                        'task': task,
                        'info_dict': info_dict,
                        'proxy_type': self.default_proxy_type,

                    }, dont_filter=True,
                    callback=self.parse_detail

                )

    def parse_detail(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        info_dict = response.meta['info_dict']
        if 'estimatedDeliveryDateLabel' in response.text:
            delivery_date = json.loads(response.text)['estimatedDeliveryDateLabel']
            # info_dict['delivery_date'] = delivery_date
            yield Request(
                url=info_dict['url'],
                headers=self.default_origin_header,
                meta={
                    'task': task,
                    'info_dict': info_dict,
                    'proxy_type': self.default_proxy_type,

                }, dont_filter=True,
                callback=self.parse_html

            )

    def parse_html(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        info_dict = response.meta['info_dict']
        product_des = response.xpath(
            '//div[@class="lv-product-detailed-features__description"]//text()').getall()
        # size = re.findall('"ImageObject",.*,"default","(.*)","internal",', response.text)
        #
        # if size:
        #     size = size[0]
        #
        # if not size:
        #     size_true = re.findall('\{identifier:bi.*?collectibles:.*?\}', response.text)
        #     if size_true:
        #         try:
        #             size = re.findall('size:"(.*)",', size_true[0])[0]
        #         except Exception as e:
        #             print(e)
        #             print(size_true)
        # if not size:
        #     size_list = response.xpath(
        #         '//div[@class="lv-product-dimension lv-product-detailed-features__dimensions"]//text()').extract()
        #     if not size_list:
        #         for pre in product_des:
        #             if 'inches' in pre:
        #                 size = pre
        #                 break
        #     else:
        #         size = ','.join(size_list)

        info_dict['size_stock'] = [{'size': info_dict['size'], 'stock': 3}]
        del info_dict['size']
        product_des = ','.join(product_des)
        info_dict['product_des'] = product_des
        spu_id = info_dict['spu_id']
        url = f' https://api.louisvuitton.com/api/eng-gb/catalog/skus/{spu_id}/low'
        yield Request(
            url=url,
            headers=self.default_origin_header,
            meta={
                'task': task,
                'info_dict': info_dict,
                'proxy_type': self.default_proxy_type,

            }, dont_filter=True,
            callback=self.parse_image
        )

    #
    def parse_image(self, response):
        self.logger.info(f'Request End, URL: {response.url}')
        task = response.meta['task']
        info_dict = response.meta['info_dict']
        image_list = json.loads(response.text)['skuList'][0]['image']
        image_url_list = list()
        if image_list:
            for image in image_list:
                image_url = image['contentUrl'].format(IMG_WIDTH=400, IMG_HEIGHT=400)
                image_url_list.append(image_url)
        info_dict['atlas'] = image_url_list
        info_dict['origin'] = 'lv_Uk'
        info_dict['md5_value'] = md5(info_dict['spu_id'] + info_dict['origin'])
        info_dict['sync'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        item = get_public_item(task, table_name='shedana_data')
        item['item'] = info_dict
        yield item
