import scrapy
from lxml import etree


class InformationSpider(scrapy.Spider):
    name = "information"
    allowed_domains = ["shangerdi.com"]
    # start_urls = ["https://angerdi.com"]

    def start_requests(self):
        urls = ["https://www.shangerdi.com/sort/quanbu-allvisit-0-1/"]
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse1)

    def parse1(self, response):
        # print(response.url)
        # print(response.text)
        response = etree.HTML(response.text)
        category_list = response.xpath("//div[@class='class-block clearfix']/ul[@class='class-block-item']/li")[1:]
        for category in category_list:
            category_name = category.xpath(".//a/text()")[0]
            category_url = category.xpath(".//a/@href")[0]
            # print(category_name, category_url)
            yield scrapy.Request(url=category_url, callback=self.parse2, meta={'category': category_name})

    def parse2(self, response):
        category_name = response.meta['category']
        category_url = response.url[:-2]
        page_num_detail = etree.HTML(response.text)
        page_nums = page_num_detail.xpath("//a[@class='last']/text()")[0]
        page_nums = int(page_nums)
        print(f'{category_name}的页数', page_nums)
        for page_num in range(page_nums):
            detail_every_page_url = category_url + f"{page_num}/"
            yield scrapy.Request(url=detail_every_page_url, callback=self.parse3,
                                 meta={'category': category_name, 'page_num': page_num})


    def parse3(self, response):
        category_name = response.meta['category']
        cartoon = etree.HTML(response.text)
        cartoon_list = cartoon.xpath("//ul[@class='cartoon-block-box clearfix']/li")
        for cartoon in cartoon_list:
            cartoon_name = cartoon.xpath(".//div[@class='cart-info']//a/text()")[0]
            cartoon_url = cartoon.xpath(".//div[@class='cart-item']/a/@href")[0]
            print(cartoon_name, cartoon_url)
            yield scrapy.Request(url=cartoon_url, callback=self.parse4,
                                 meta={'cartoon_name': cartoon_name, 'category_name': category_name})

    def parse4(self, response):
        category_name = response.meta['category_name']
        cartoon_name = response.meta['cartoon_name']
        cartoon_url= response.url
        chapter = etree.HTML(response.text)
        chapter_list = chapter.xpath("//ul[@class='chapter-list mt20 clearfix']/li")
        for chapter in chapter_list:
            chapter_name = chapter.xpath(".//a/text()")[0]
            chapter_url = cartoon_url + chapter.xpath(".//a/@href")[0]
            yield {
                'category_name': category_name,
                'cartoon_name': cartoon_name,
                'chapter_name': chapter_name,
                'chapter_url': chapter_url,
            }

