import scrapy


from scrapy_redis.spiders import RedisSpider
from selenium import webdriver
from selenium.webdriver import ChromeOptions

class JdredisSpider(RedisSpider):
    name = 'jdredis'
    redis_key = 'jd'

    def __init__(self, *args, **kwargs):
        domain = kwargs.pop('domain', '')
        self.allowed_domains = list(filter(None, domain.split(',')))
        option = ChromeOptions()
        option.headless = True
        self.driver = webdriver.Chrome('C:\Program Files\Google\Chrome\Application\chromedriver.exe', options=option)
        super(JdredisSpider, self).__init__(*args, **kwargs)

    def parse(self, response):
        category_class_one_list = response.xpath('//div[2]/div[1]/div[2]/div[1]/div/div[1]/h2/span')
        for category_class_one_node in category_class_one_list:
            category_class_one = category_class_one_node.xpath('./text()').extract_first()
            category_class_two_list = category_class_one_node.xpath('../../following-sibling::div/div[3]/dl/dt/a')
            for category_class_two_node in category_class_two_list:
                category_class_two = category_class_two_node.xpath('./text()').extract_first()
                category_class_two_link = response.urljoin(category_class_two_node.xpath('./@href').extract_first())
                category_class_three_list = category_class_two_node.xpath('../../dd/a')
                for category_class_three_node in category_class_three_list:
                    category_class_three = category_class_three_node.xpath('./text()').extract_first()
                    category_class_three_link = response.urljoin(
                        category_class_three_node.xpath('./@href').extract_first())
                    temp = {}
                    temp['category_class_one'] = category_class_one
                    temp['category_class_two'] = category_class_two
                    temp['category_class_two_link'] = category_class_two_link
                    temp['category_class_three'] = category_class_three
                    temp['category_class_three_link'] = category_class_three_link
                    print(temp)
                    yield scrapy.Request(
                        url=temp['category_class_three_link'],
                        callback=self.parse_category_class_four,
                        meta={"class_three": temp}
                    )

    def parse_category_class_four(self, response):
        # print(response)
        item = response.meta["class_three"]
        print(item)
        commodity_list = response.xpath('//*[@id="J_goodsList"]/ul/li/div')
        for commodity in commodity_list:
            item['commodity_name'] = commodity.xpath('./div[3]/a/em/text()').extract_first()
            item['commodity_author'] = commodity.xpath('./div[4]/span[1]/a/text()').extract_first()
            item['commodity_link_detail'] = response.urljoin(commodity.xpath('./div[3]/a/@href').extract_first())
            item['commodity_price'] = commodity.xpath('./div[2]/strong/i/text()').extract_first()
            print(item)
        # temp = response.meta["class_three"]
        # category_class_four_list = response.xpath('//*[@id="app"]//ul/li/a')
        # for category_class_four_node in category_class_four_list[:1]:
        #     category_class_four = category_class_four_node.xpath('./text()').extract_first()
        #     temp["category_class_four"] = category_class_four
        #     category_class_five_list = category_class_four_node.xpath('../nav/a')
        #     for category_class_five_node in category_class_five_list[:1]:
        #         category_class_five = category_class_five_node.xpath('./text()').extract_first()
        #         category_class_five_link = response.urljoin(category_class_five_node.xpath('./@href').extract_first())
        #         temp["category_class_five"] = category_class_five
        #         temp["category_class_five_link"] = category_class_five_link
        #         print(temp)

