import scrapy
from scrapy_redis.spiders import RedisSpider
from scrapy.utils.request import request_fingerprint
import hashlib
from scrapy.utils.serialize import ScrapyJSONEncoder
from scrapy.selector import Selector
import json, re, random
import logging
from urllib.parse import quote, unquote
from curl_cffi import requests as cffi_requests
from fake_useragent import UserAgent
from Icrawler9.tools import utils, configs
from Icrawler9 import settings
from Icrawler9.items import DatasItem


logger = logging.getLogger(__name__)


class DigikeySpider(RedisSpider):
    name = 'digikey'
    allowed_domains = []
    rds_tasks = '{}:tasks'.format(name)
    rds_fp = '{}:dupefilter'.format(name)
    rds_errs = rds_tasks
    redis_key = rds_tasks
    default_serialize = ScrapyJSONEncoder().encode
    host = 'https://www.digikey.com'
    # search_host = 'https://www.digikey.com/en/products/result?keywords={}'
    avail_url = 'https://www.digikey.com/products/api/v5/lead-time/{}/{}'
    custom_settings = {k:v for k, v in configs.dataplatform_settings.items()}
    # custom_settings['REDIS_ITEMS_KEY']= '{}:items'.format(name)
    # custom_settings['REDIRECT_ENABLED']= False
    # custom_settings['RETRY_HTTP_CODES'] = [500]
    # custom_settings['HTTPERROR_ALLOWED_CODES'] = [200, 403, 429]
    custom_settings['CONCURRENT_REQUESTS'] = 1
    custom_settings['CONCURRENT_REQUESTS_PER_DOMAIN'] = 1
    # custom_settings['DOWNLOAD_DELAY'] = 0.5
    custom_settings['DOWNLOAD_TIMEOUT'] = 60
    custom_settings['RETRY_TIMES'] = 0
    custom_settings['ITEM_PIPELINES'] = {
        # 'Icrawler9.pipelines.RedisPipelines.RedisPipeline': 100,
        # 'Icrawler9.pipelines.RedisPipelines.DataplatformDupPipeline': 200,
        # 'Icrawler9.pipelines.RedisPipelines.PinDupPipeline': 200,
        'Icrawler9.pipelines.mongodbPipeline': 400,
        # 'Icrawler9.pipelines.dbPipelines.MySQLPipeline': 400,
        # 'Icrawler9.pipelines.excelpipelines.ExcelPintopinPipeline': 500,
    }
    custom_settings['DOWNLOADER_MIDDLEWARES'] = {
        # 'Icrawler9.middlewares.downloaderMiddlewares.RequestsMiddleware': 400,
        # 'Icrawler9.middlewares.downloaderMiddlewares.CffiRequestMiddleware': 400,
        'Icrawler9.middlewares.downloaderMiddlewares.ByPassJa3RequestMiddleware': 400,
        # 'Icrawler9.middlewares.downloaderMiddlewares.ToScrapyRespMiddleware': 400,
        # 'Icrawler9.middlewares.downloaderMiddlewares.RandomUserAgent': 200,
        # 'Icrawler9.middlewares.downloaderMiddlewares.ProxyMiddleware': 300,
    }
    config = {
        'sources': 'digikey',
        'creator': 'gxq',
    }
    headers = {
        'authority': 'www.digikey.com',
        'accept': '*/*',
        'accept-language': 'zh-CN,zh;q=0.9',
        'authorization': 'Bearer',
        'cache-control': 'no-cache',
        'lang': 'en',
        'pragma': 'no-cache',
        'request-context': 'appId=cid-v1:40371992-8794-4ad9-9011-4552f68fdb07',
        'request-id': '|9931708efd18447e9ce81c088e8e7939.de18271e86f9433c',
        'site': 'us',
        'traceparent': '00-9931708efd18447e9ce81c088e8e7939-de18271e86f9433c-01',
        'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36',
        'x-currency': 'USD',
        'x-request-id': 'd962c1b4-ecca-4834-9eb6-33fad8f629c4',
    }
    serialize = ScrapyJSONEncoder().encode
    proxies = utils.get_proxys(configs.proxy)

    def make_requests_from_url(self, task):
        url = self.host + '/en/products/detail/' + task.split('|||')[0].split('/products/detail/')[-1] if re.search(r'\/products\/detail\/', task.split('|||')[0]) else task.split('|||')[0]
        # self.headers['user-agent'] = UserAgent()['random']
        yield scrapy.Request(url, headers=self.headers, callback=self.parse_data, errback=self.errback_response)

    def parse_data(self, response):
        print(response.status, response.url)
        data_reg = '//*[@id="__NEXT_DATA__"]//text()'
        datasjs = ''.join([dx.strip() for dx in response.xpath(data_reg).extract() if dx.strip()])
        if utils.check_json_format(datasjs):
            datas = json.loads(datasjs)
            props = datas.get('props') if datas.get('props') else dict()
            pageProps = props.get('pageProps') if props.get('pageProps') else dict()
            envelope = pageProps.get('envelope') if pageProps.get('envelope') else dict()
            data = envelope.get('data') if envelope.get('data') else dict()
            productOverview = data.get('productOverview') if data.get('productOverview') else dict()
            carouselMedia = data.get('carouselMedia') if data.get('carouselMedia') else list()
            manufacturerProductNumber = productOverview.get('manufacturerProductNumber').strip() if productOverview.get('manufacturerProductNumber') else ''
            if manufacturerProductNumber:
                manufacturer = productOverview.get('manufacturer').strip() if productOverview.get('manufacturer') else ''
                manufacturerUrl = productOverview.get('manufacturerUrl').strip() if productOverview.get('manufacturerUrl') else ''
                manufacturer_url = manufacturer_logo = manufacturer_desc = ''
                if manufacturerUrl:
                    manufacturer_url = manufacturerUrl if re.search(r'^http', manufacturerUrl) else self.host + manufacturerUrl
                    # try:
                    #     sess = cffi_requests.Session()
                    #     sess.proxies = utils.get_proxys(self.proxy)
                    #     manufacturer_resp = sess.get(manufacturer_url, impersonate=random.choice(configs.impersonates), timeout=30)
                    #     print('manufacturer_resp status: ', manufacturer_resp.status_code)
                    #     if manufacturer_resp.status_code == 200:
                    #         manufacturer_logo, manufacturer_desc = self.parse_manufacturer(manufacturer_resp)
                    # except:
                    #     pass
                manufacturers = dict()
                manufacturers['manufacturer_name'] = manufacturer
                manufacturers['manufacturer_url'] = manufacturer_url
                manufacturers['manufacturer_logo'] = manufacturer_logo
                manufacturers['manufacturer_desc'] = manufacturer_desc
                description = productOverview.get('description').strip() if productOverview.get('description') else ''
                productAttributes = data.get('productAttributes') if data.get('productAttributes') else dict()
                attributes_ = productAttributes.get('attributes') if productAttributes.get('attributes') else list()
                attributes = dict()
                for attribute_ in attributes_:
                    attribute_label = attribute_.get('label').strip() if attribute_.get('label') else ''
                    attribute_values = attribute_.get('values') if attribute_.get('values') else list()
                    attribute_v = ' '.join([attribute_value.get('value').strip() for attribute_value in attribute_values if attribute_value.get('value').strip()])
                    if not attributes.get(attribute_label):
                        attributes[attribute_label] = attribute_v
                    else:
                        attributes[attribute_label] = '; '.join([attributes.get(attribute_label), attribute_v])
                categories_ = productAttributes.get('categories') if productAttributes.get('categories') else list()
                categories = list()
                for categorie_ in categories_:
                    categorie_label = categorie_.get('label').strip() if categorie_.get('label') else ''
                    categories.append(categorie_label)
                img_urls = list()
                for cm in carouselMedia:
                    displayUrl = cm.get('displayUrl')
                    if displayUrl:
                        img_url = displayUrl if re.search(r'^http', displayUrl) else 'https:' + displayUrl
                        img_urls.append(img_url)
                datasheet_urls = list()
                datasheet_url = productOverview.get('datasheetUrl') if productOverview.get('datasheetUrl') else ''
                if datasheet_url: 
                    datasheet_url = 'https:' + datasheet_url if not re.search(r'^http', datasheet_url) else datasheet_url
                    datasheet_urls.append(datasheet_url)
                stocks = dict()
                priceQuantity = data.get('priceQuantity') if data.get('priceQuantity') else dict()
                qtyAvailable = priceQuantity.get('qtyAvailable') if priceQuantity.get('qtyAvailable') else '0'
                stocks['total'] = qtyAvailable
                prices = list()
                pricing = priceQuantity.get('pricing') if priceQuantity.get('pricing') else list()
                for pc in pricing:
                    packaging = pc.get('packaging').strip() if pc.get('packaging') else ''
                    pricingTiers = pc.get('pricingTiers') if pc.get('pricingTiers') else pc.get('mergedPricingTiers') if pc.get('mergedPricingTiers') else list()
                    for pricingTier in pricingTiers:
                        if pricingTier: 
                            # pricingTier['packagingType'] = packaging
                            # prices.append(pricingTier)
                            qty = pricingTier.get('breakQty') if pricingTier.get('breakQty') else pricingTier.get('brkQty') if pricingTier.get('brkQty') else ''
                            price = pricingTier.get('unitPrice') if pricingTier.get('unitPrice') else ''
                            extendedPrice = pricingTier.get('extendedPrice') if pricingTier.get('extendedPrice') else pricingTier.get('extPrice') if pricingTier.get('extPrice') else ''
                            pack_type = packaging
                            if qty: prices.append({"breakQty": qty, "unitPrice": price, "extendedPrice": extendedPrice, "packagingType": pack_type})
                rohs_val = pbfree_code = product_status = series = package = encapsulation = ''
                environmental = data.get('environmental') if data.get('environmental') else dict()
                dataRows = environmental.get('dataRows') if environmental.get('dataRows') else list()
                for row in dataRows:
                    dataCells = row.get('dataCells') if row.get('dataCells') else []
                    data1 = dataCells[0].get('data') if dataCells[0].get('data') else dict()
                    value1 = data1.get('value') if data1.get('value') else dict()
                    val1 = value1.get('value') if value1.get('value') else ''
                    if 'RoHS Status'.lower() in val1.lower(): 
                        data2 = dataCells[1].get('data') if dataCells[1].get('data') else dict()
                        value2 = data2.get('value') if data2.get('value') else dict()
                        val2 = value2.get('value') if value2.get('value') else ''
                        rohs_val = val2.strip()
                        break
                product_status = attributes.get('Product Status').strip() if attributes.get('Product Status') else attributes.get('Part Status') if attributes.get('Part Status') else ''
                if attributes.get('Series'): series = attributes.get('Series').strip()
                if attributes.get('Package / Case'): package = attributes.get('Package / Case').strip()
                encapsulation = attributes.get('Packaging').strip() if attributes.get('Packaging') else attributes.get('Package').strip() if attributes.get('Package') else ''
                other_attributes = dict()
                other_attributes['rohs_val'] = rohs_val
                other_attributes['pbfree_code'] = pbfree_code
                other_attributes['product_status'] = product_status
                other_attributes['series'] = series
                other_attributes['package'] = package
                other_attributes['encapsulation'] = encapsulation
                other_attributes['stocks'] = stocks
                substitutes = data.get('substitutes') if data.get('substitutes') else dict()
                pins = self.parse_pins(substitutes, manufacturerProductNumber, manufacturer, description, response.url)
                item = DatasItem()
                item['code'] = manufacturerProductNumber
                item["description"] = description
                item['category'] = categories
                item['manufacturers'] = manufacturers
                item['manufacturer_name'] = manufacturer
                item["img_urls"] = img_urls
                item["datasheet_urls"] = datasheet_urls
                item["prices"] = prices
                item["attributes"] = attributes
                item["other_attributes"] = other_attributes
                item["all_json"] = data
                item['pins'] = pins
                item["url"] = response.url
                item["sources"] = self.config.get('sources')
                yield item

    def parse_pins(self, substitutes, code, manufacturer_name, description, url):
        pins = list()
        dataHeaders = substitutes.get('dataHeaders') if substitutes.get('dataHeaders') else []
        keys = [dh.strip() for dh in dataHeaders]
        for key in keys:
            ind = keys.index(key)
            key = re.sub(r'\s+|\n|\r|\t', ' ', key.lower())
            if 'Part No.'.lower() == key:
                keys[ind] = 'pin_code'
            elif 'Manufacturer'.lower() in key:
                keys[ind] = 'pin_manufacturer_name'
            elif 'Substitute Type'.lower() in key:
                keys[ind] = 'level_str'
        dataRows = substitutes.get('dataRows') if substitutes.get('dataRows') else []
        for dataRow in dataRows:
            vals = []
            dataCells = dataRow.get('dataCells') if dataRow.get('dataCells') else []
            for dataCell in dataCells:
                dc = dataCell.get('data') if dataCell.get('data') else dict()
                value = dc.get('value') if dc.get('value') else dict()
                val = value.get('value') if value.get('value') else ''
                label = value.get('label') if value.get('label') else ''
                if label:
                    href = value.get('url') if value.get('url') else ''
                    if href:
                        url = href if re.search(r'^http', href) else self.host + href
                        val = label + '|||' + url
                    else:
                        val = label
                vals.append(val.strip())
            if len(vals) != len(keys): continue
            itm = dict(zip(keys, vals))
            pcu = itm.get('pin_code').strip() if itm.get('pin_code') else ''
            pin_code = pcu.split('|||')[0].strip()
            pin_code_url = pcu.split('|||')[1] if re.search(r'\|\|\|', pcu) else ''
            pin_manufacturer_name = itm.get('pin_manufacturer_name').strip() if itm.get('pin_manufacturer_name') else ''
            if not pin_code: continue
            level_str = itm.get('level_str').strip() if itm.get('level_str') else ''
            pin_item = dict()
            pin_item['code'] = code
            pin_item['manufacturer_name'] = manufacturer_name
            pin_item['description'] = description
            pin_item['code_url'] = url
            pin_item['pin_code'] = pin_code
            pin_item['pin_manufacturer_name'] = pin_manufacturer_name
            pin_item['pin_description'] = ''
            pin_item["level_str"] = level_str
            pin_item['pin_code_url'] = pin_code_url
            pin_item['url'] = url
            if pin_item: pins.append(pin_item)
        return pins
        
    def errback_response(self, failure):
        fp = request_fingerprint(failure.request)
        self.server.srem(self.rds_fp, fp)
        self.server.rpush(self.rds_tasks, failure.request.url)