import scrapy
from urllib import request
from w3lib.html import remove_tags
from script_test.items import CnblogsItem

class CnblosSub(scrapy.Spider):
    name = "sub"
    allowed_domains = []
    start_urls = ["https://www.cnblogs.com/aggsite/SubCategories"]

    def parse(self, response):
        url = "https://www.cnblogs.com/aggsite/SubCategories"
        body = '{"cateIds":"108698,2,108701,108703,108704,108705,108709,108712,108724,4"}'
        yield scrapy.Request(url,callback=self.parse_list,method='POST',body=body)

    def parse_list(self,response):

        href = response.css('a::attr(href)').extract()
        for url in href:
            url = request.urljoin(response.url,url)
            yield scrapy.Request(url,callback=self.parse_page)

    def parse_page(self,response):
        max_page = response.css('a.last::text').extract()[0]
        for page in range(int(max_page), 0, -1):
            list_page = response.url + str(page)
            print(list_page)
            yield scrapy.Request(list_page, callback=self.parse_infolist)

    def parse_infolist(self,response):
        item = CnblogsItem()
        lists = response.css('div.post_item')
        for l in lists:
            info_url = l.css('a.titlelnk::attr(href)').extract()[0]
            title = l.css('a.titlelnk::text').extract()[0]
            author = l.css('a.lightblue::text').extract()[0]
            time = l.css('div.post_item_foot::text').extract()[1].strip().strip("发布于 ")
            comment = l.css('span.article_comment a::text').extract()[0].strip()
            view = l.css('span.article_view a::text').extract()[0].strip('阅读')
            diggnum = l.xpath('.//span[@class="diggnum"]//text()').extract()[0]

            # 写入数据
            item["info_url"] = info_url
            item["title"] = title
            item["author"] = author
            item["time"] = time
            item["comment"] = comment
            item["view"] = view
            item["diggnum"] = diggnum

            # 发起详情请求
            yield scrapy.Request(info_url, callback=self.parse_info, meta={"item": item})

        # 解析详情页 拿取文章

    def parse_info(self, response):
        item = response.meta["item"]
        print(response.url)
        # 取出重要内容
        article = response.css('div#cnblogs_post_body').extract()[0]
        article = remove_tags(article, keep=("div", "p"))
        item["article"] = article
        yield item

