import scrapy
from selenium import webdriver
import json.encoder


class SidebarSpider(scrapy.Spider):
    name = "sidebar"

    def __init__(self):
        super().__init__()
        self.browser = webdriver.Chrome(r"D:\plugins\chromedriver_win32\chromedriver.exe")

    def start_requests(self):
        urls = [
            'https://uniapp.dcloud.io/README',
            'https://uniapp.dcloud.io/collocation/pages',
            'https://uniapp.dcloud.io/component/',
            'https://uniapp.dcloud.io/api/',
            'https://uniapp.dcloud.io/uniCloud/'
        ]

        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response, **kwargs):
        selectors = response.xpath('.//ul[@class="sidebar-nav-ul"]/li')
        items = []
        if selectors is None:
            return
        for elem in selectors:
            title = elem.xpath('.//span/text()').get()
            if title is None:
                # this is link
                items.append({
                    'title': elem.xpath('.//a/text()').get(),
                    'path': elem.xpath('.//a/@href').get()
                })
            else:
                # menu list
                children = elem.xpath('.//ul/li')
                if children is None:
                    pass
                list = []
                for child in children:
                    link = child.xpath('.//a/@href').get()
                    text = child.xpath('.//a/text()').get()
                    list.append([link, text])
                items.append({
                    'title': title,
                    'children': list
                })
        filename = response.url.split('/')[3]
        with open(f'{filename}.json', 'w+') as f:
            json.dump(items, f)

    def close(spider, reason):
        spider.browser.quit()
