import scrapy

from PCL.items import PclItem
class PclSpider(scrapy.Spider):
    name = "pcl"
    allowed_domains = ["pcl.readthedocs.io"]
    start_urls = ["https://pcl.readthedocs.io/projects/tutorials/en/master"]

    def parse(self, response):

        big_node_list = response.xpath('//h1')
        print(len(big_node_list))
        for big_node in big_node_list[1:]:
            big_category = big_node.xpath("./text()").extract_first()

            small_node_list = big_node.xpath('./following-sibling::blockquote//li')


            for small_node in small_node_list:
                temp={}
                small_node.xpath("./p/text()").extract_first()
                temp['big_category'] = big_category
                temp['small_category'] = small_node.xpath("./p//text()").extract_first()
                temp['small_category_link'] = response.urljoin(small_node.xpath('./p//@href').extract_first())

                # print(temp)
                yield scrapy.Request(
                    url=temp['small_category_link'],
                    callback=self.parse_title_list,
                    meta=temp
                )
        pass
    def parse_title_list(self,response):
        temp = response.meta

        job_list = response.xpath('/html/body/div/nav/div/div[2]/div/ul/li')
        print(job_list)

        items = []
        for job in job_list:
            item = PclItem()

            item['big_category'] = temp['big_category']
            item['small_category'] = temp['small_category']
            item['small_category_link'] = temp['small_category_link']

            item['title'] = job.xpath('./a/text()').extract()
            print(item)
        items.append(item)
        return items
