import time

import scrapy
import re
from loguru import logger
from scrapy import cmdline
from scrapy.http import HtmlResponse
from scrapy_redis import spiders
from amazon.items import AmazonItem


class AmazonCommoditySpider(spiders.Spider):
    name = "amazon_commodity"

    redis_key = "amazon_commodity"
    commodity_catalog = set()  # Amazon所有商品种类的bbn (22个)
    commodities = set()  # Amazon所有商品的data_asin(商品id)
    try_commodities = dict()  # 记录每个商品触发"人机验证"的次数
    robot_max_retry_times = 2  # 每个商品触发"人机验证"可重试的最大次数

    def start_requests(self):
        # 获取所有商品种类的id
        commodity_catalog_url = "https://www.amazon.com/nav/ajax/hamburgerMainContent?ajaxTemplate=hamburgerMainContent&pageType=Gateway&hmDataAjaxHint=1&navDeviceType=desktop&isSmile=0&isPrime=0&isBackup=false&hashCustomerAndSessionId=9265450a4a37ea1ecc6f3377d5c592346f0ab06a&languageCode=zh_CN&environmentVFI=AmazonNavigationCards%2Fdevelopment%40B6289328400-AL2_aarch64&secondLayerTreeName=prm_digital_music_hawkfire%2Bkindle%2Bandroid_appstore%2Belectronics_exports%2Bcomputers_exports%2Bsbd_alexa_smart_home%2Barts_and_crafts_exports%2Bautomotive_exports%2Bbaby_exports%2Bbeauty_and_personal_care_exports%2Bwomens_fashion_exports%2Bmens_fashion_exports%2Bgirls_fashion_exports%2Bboys_fashion_exports%2Bhealth_and_household_exports%2Bhome_and_kitchen_exports%2Bindustrial_and_scientific_exports%2Bluggage_exports%2Bmovies_and_television_exports%2Bpet_supplies_exports%2Bsoftware_exports%2Bsports_and_outdoors_exports%2Btools_home_improvement_exports%2Btoys_games_exports%2Bvideo_games_exports%2Bgiftcards%2Bamazon_live%2BAmazon_Global&customerCountryCode=HK"
        yield scrapy.Request(commodity_catalog_url, callback=self.parse)

    async def parse(self, response: HtmlResponse, **kwargs):
        logger.info(f"状态码:{response.status} 访问:{response.request.url}, 使用ip:{response.request.meta['proxy']}")
        json_data = response.json()['data']
        self.commodity_catalog = set(re.findall("bbn=(\d*)", json_data))  # 所有的商品id
        species_commodity_url = "https://www.amazon.com/s?i=specialty-aps&bbn={}&page={}&language=zh"
        logger.info(f"商品种类id获取成功!!! 商品ID总数目:{len(self.commodity_catalog)}, 商品bbn:{self.commodity_catalog}")
        self.commodity_catalog = {'7581691011'}  # TODO for debug
        # 遍历所有种类的商品
        for bbn in self.commodity_catalog:
            yield scrapy.Request(species_commodity_url.format(bbn, 1), callback=self.parse_species_commodity, dont_filter=True)

    # 获取当前"品种商品"的所有商品ID, 并存储到commodities中
    def parse_species_commodity(self, response: HtmlResponse, **kwargs):
        # for debug
        with open(f"../parse_debug/response_{time.time()}.txt", "w", encoding="utf-8") as f:
            f.write(response.body.decode('utf-8'))


        # 获取当前页所有商品id(data_asin)
        goods = response.xpath('//div[@class="sg-col-20-of-24 s-result-item s-asin sg-col-0-of-12 sg-col-16-of-20 sg-col s-widget-spacing-small sg-col-12-of-16"]')
        for good in goods:
            self.commodities.add(good.attrib['data-asin'])
        # 判断是否有下一页
        reqeust_url = response.request.url
        bbn = re.findall("bbn=(\d*)", reqeust_url)[0]
        page = int(re.findall("page=(\d*)", reqeust_url)[0]) + 1  # 正则提取url中的bbn(商品品种)和page(当前品种商品的第几页)
        logger.info(f"当前商品ID:{bbn};第{page}页数据;Amazon总商品数量{len(self.commodities)}")  # TODO 分布式!∴你的commodities最终要变为存储在Redis中才能实现共享
        species_commodity_url = "https://www.amazon.com/s?i=specialty-aps&bbn={}&page={}&language=zh"
        have_next_page = response.xpath('//a[@class="s-pagination-item s-pagination-next s-pagination-button s-pagination-button-accessibility s-pagination-separator"]')

        debug_flag = True
        # TODO for debug begin 少爬一点商品
        if len(self.commodities) >= 100:
            debug_flag = False
        # debug end

        # 如果有下一页, 继续爬取
        if have_next_page and debug_flag:
            yield scrapy.Request(species_commodity_url.format(bbn, page), callback=self.parse_species_commodity, dont_filter=True)
        # 【妙】没有,就从商品列表中删除
        else:
            self.commodity_catalog.remove(bbn)
            # 【妙】直到最后一个商品也处理完成, 会进行所有商品的清洗
            if len(self.commodity_catalog) == 0:
                print("len(self.commodities)", len(self.commodities))
                # 进行数据清洗
                commodity_url = "https://www.amazon.com/-/zh/dp/{}"
                for data_asin in self.commodities:
                    logger.info(f"商品{data_asin}正在解析...")
                    yield scrapy.Request(commodity_url.format(data_asin), callback=self.extract_commodity, dont_filter=True)

    # 每个商品的数据清洗
    def extract_commodity(self, response: HtmlResponse, **kwargs):
        item = AmazonItem()
        item['url'] = response.request.url
        item['title'] = (response.xpath('//span[@id="productTitle"]/text()').extract_first() or "").strip()
        item['price'] = next((response.xpath(one_xpath).extract_first()
                              for one_xpath in AmazonItem.price_xpath_list
                              if response.xpath(one_xpath)), "")
        # 重发请求 解决"人机验证"
        bbn = response.request.url.split('/')[-1]  # 商品ID
        # ①如果没有title=>触发人机验证 and ②重新发送请求次数没有超过阈值
        if not item['title'] and self.try_commodities.get(bbn, 0) <= self.robot_max_retry_times:
            self.try_commodities.setdefault(bbn, 0)  # 确保键存在，初始值为0
            self.try_commodities[bbn] += 1
            yield scrapy.Request(response.request.url, callback=self.extract_commodity, dont_filter=True)
        else:
            yield item


if __name__ == '__main__':
    cmdline.execute("scrapy crawl amazon_commodity".split())

# <class 'set'> 22 {'16225015011', '16225005011', '256643011', '16225017011',
# '16225020011', '16225007011', '16225006011', '16225010011', '16225014011',
# '16225008011', '16225016011', '16225018011', '16225012011', '16225013011',
# '2562090011', '16225019011', '16225011011', '2238192011', '4954955011',
# '7581691011', '16225021011', '16225009011'}

# ['B0CBWZWYK9', 'B0CJQJ8GLH', 'B0D8B9KKZR', 'B00YBQX3I0',
# 'B0DL8YDPVX', 'B01DWNI30K', 'B01DWNFZ4C', 'B00YBQTU82',
# 'B01DDG437I', 'B09J4QRBTX', 'B084VCPR1N', 'B0DJ27JS55',
# 'B0D5Y95LL6', 'B08YPBX28C', 'B0DLGGZJK1', 'B00YBQSIMQ',
# 'B0D476TZ24', 'B01H0IVBPC', 'B0DK2ZNX69', 'B07XQFMJN4',
# 'B0D57XBHHY', 'B01DWNIL0W', 'B00YBQTOE2', 'B00IY888E0',
# 'B000QIWFYU', 'B0D6VX5DS5', 'B07477VKB9', 'B0CTWD1ZZJ',
# 'B0B2TGDLQ4', 'B092FR8HKP', 'B08G4Y3MLM', 'B00LXZHKTA',
# 'B00XR701CU', 'B0DKNLXR9G', 'B0B4YTTZ1S', 'B093BG5SVP',
# 'B0BQWGGB5L', 'B083GY5KP1', 'B0CPCRVVZ4', 'B0CQS6LYPZ']
