import scrapy
from selenium import webdriver
from selenium.webdriver import ChromeOptions

class ChanneltestSpider(scrapy.Spider):
    name = 'channeltest'
    allowed_domains = ['channel.jd.com']
    start_urls = ['https://channel.jd.com/1713-3262.html']

    def __init__(self):
        super(ChanneltestSpider, self).__init__(name='channeltest')
        option = ChromeOptions()
        option.headless = True
        self.driver = webdriver.Chrome('C:\Program Files\Google\Chrome\Application\chromedriver.exe', options=option)

    def parse(self, response):
        # print(response)
        temp = {}
        category_class_four_list = response.xpath('//*[@id="app"]//ul/li/a')
        for category_class_four_node in category_class_four_list[:1]:
            category_class_four = category_class_four_node.xpath('./text()').extract_first()
            temp["category_class_four"] = category_class_four
            category_class_five_list = category_class_four_node.xpath('../nav/a')
            for category_class_five_node in category_class_five_list[:1]:
                category_class_five = category_class_five_node.xpath('./text()').extract_first()
                category_class_five_link = response.urljoin(category_class_five_node.xpath('./@href').extract_first())
                temp["category_class_five"] = category_class_five
                temp["category_class_five_link"] = category_class_five_link
                print(temp)
