import scrapy

from SpiderScrapy.items import CnBlogItem
from scrapy import Request


class CnblogsSpider(scrapy.Spider):
    name = "cnblogs"
    allowed_domains = ["www.cnblogs.com"]
    start_urls = ["https://www.cnblogs.com"]

    def parse(self, response):
        # 【1】获取所有的文章列表
        article_list_all = response.xpath('//*[@id="post_list"]/article')
        # 【2】遍历每一篇文章获取到文章详情内容
        for article_obj in article_list_all:
            # 需要在循环内部创建item
            item = CnBlogItem()
            # （3）文章标题
            title = article_obj.xpath('./section/div/a/text()').extract_first()
            try:
                # （4）文章详情链接
                detail_url = article_obj.xpath('./section/div/a/@href').extract_first()
                # （5）文章简介内容
                try:
                    desc = article_obj.xpath('./section/div/p/text()').extract()[1].strip()
                except Exception as e:
                    desc = ""
                # （6）作者名字
                author_name = article_obj.xpath('./section/footer/a[1]/span/text()').extract_first()
                # （7）作者主页地址
                author_blog = article_obj.xpath('./section/footer/a[1]/@href').extract_first()
                # （8）评论数
                comment_num = article_obj.xpath('./section/footer/a[2]/span/text()').extract_first()
                # （9）点赞数
                up_num = article_obj.xpath('./section/footer/a[3]/span/text()').extract_first()
                # （10）观看数
                visit_num = article_obj.xpath('./section/footer/a[4]/span/text()').extract_first()

                # （11）将上面解析到 所有数据添加到 管道类中
                # 管道类对象类似于 字典
                item.update({
                    "title": title,
                    "detail_url": detail_url,
                    "desc": desc,
                    "author_name": author_name,
                    "author_blog": author_blog,
                    "comment_num": comment_num,
                    "up_num": up_num,
                    "visit_num": visit_num,
                })
                # 提交管道数据
                yield Request(url=detail_url, callback=self.parse_content, meta={
                    "item": item
                })
                # yield item
            except Exception as e:
                print(f"{title} >>>>>>> {e}")
        next_url = 'https://www.cnblogs.com' + response.xpath(
            '//*[@id="paging_block"]/div/a[last()]/@href').extract_first()
        yield Request(url=next_url, callback=self.parse)

    def parse_content(self, response):
        item = response.meta.get("item")
        # 解析文章的详情内容
        content = str(response.xpath("//div[@id='cnblogs_post_body']").extract_first())
        print(f"解析到详情内容")
        # print(item)
        item.update({
            "content": content
        })
        print(f"item: >>>; {item}")
        yield item
