import random
from typing import Iterable

import scrapy
from scrapy import Request

from ..items import JdshoeproItem

headers = {
    'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36 Chrome 17.0 – MAC",
    "referer": "https://list.jd.com/",
}

# 可以将你的所准备的Cookies做成列表放入其中
# cookies_list =[]
# temp = random.choice(cookies_list)

temp = "此处填写你的JD_Cookies"
cookies = {data.split('=')[0]: data.split('=')[-1]for data in temp.split(";")}

class JdshoespiderSpider(scrapy.Spider):
    name = "jdshoespider"
    # allowed_domains = ["www.jd.com"]
    # start_urls = ["https://list.jd.com/list.html?cat=1318,12099,9756"]
    start_urls = ["https://list.jd.com/list.html?cat=1318,12099,9756","https://www.jd.com"]

    def start_requests(self):
        url = self.start_urls[0]
        yield scrapy.Request(
            url=url,
            callback=self.parse,
            cookies=cookies,
        )

    def parse(self, response):
        # 获取当页所有的产品节点
        products_node = response.xpath("//div[@id='J_goodsList']/ul/li")
        for product_node in products_node:
            # 产品标题
            product_name = product_node.xpath(".//div/a[@target='_blank']//em/text()").extract_first()
            # 产品价格
            product_price = product_node.xpath("./div/div/strong/i/text()").extract_first()
            # 产品规格
            product_size = product_node.xpath(".//div[@class='ps-wrap']/ul[@class='ps-main']/li/a/@title").extract()
            # 产品详情页面url
            product_details_url = product_node.xpath(".//div/a[@target='_blank']/@href").extract_first()
            if 'https:' not in product_details_url:
                product_details_url = "https:" + product_details_url
            # 数据传递temp
            temp = {}
            temp['title'] = product_name
            temp['price'] = product_price
            temp['size'] = product_size
            yield scrapy.Request(
                url=product_details_url,
                callback=self.pares_product_details,
                cookies=cookies,
                meta={'temp': temp}
            )

    def pares_product_details(self, response):
        # 传递参数
        temp = response.meta['temp']
        # 产品介绍图
        img_url = response.xpath('//div[@id="spec-list"]//li[@class="img-hover"]/img/@src').extract_first()
        if "https:" not in img_url:
            img_url = "https:" + img_url
        # 获取详情节点
        details_node = response.xpath('//div[@class="detail"]')
        for detail in details_node:
            details = detail.xpath('.//div[@class="p-parameter"]//text()').extract()
            details = [i.strip() for i in details if i.strip()]
            detail_list = [num.split("：") if "：" in num else num for index, num in enumerate(details)]
            de_dict = {}
            for index, value in enumerate(detail_list):
                if type(value) == list:
                    if value[1] != '':
                        de_dict[value[0]] = value[1]
                    else:
                        de_dict[value[0]] = detail_list[index + 1]
            temp['sku'] = de_dict["货号"]
            temp['details'] = de_dict
            temp['img_url'] = img_url
            # print(temp)
            yield temp



