from bs4 import BeautifulSoup
from ..items import ItblogItem
import scrapy


class CnblogSpider(scrapy.Spider):
    name = "cnblog"
    allowed_domains = ["www.cnblogs.com"]
    start_urls = ["https://www.cnblogs.com/sitehome/p/100"]

    current_page = 1

    def parse(self, response):

        bs = BeautifulSoup(response.text, "lxml")
        sections = bs.select("#post_list article.post-item section.post-item-body")
        for section in sections:
            item = ItblogItem()
            # 博客标题详情页地址
            a = section.select_one("div.post-item-text a.post-item-title")
            item["detail_url"] = a["href"]
            item["title"] = a.text
            # 摘要
            description = section.select_one("div.post-item-text p.post-item-summary").text
            # 去掉摘要空白
            item["description"] = description.strip("\n\t ...")
            # 博客作者
            item["author"] = section.select_one("footer.post-item-foot a.post-item-author span").text
            item["publish_time"] = section.select_one("footer.post-item-foot span.post-meta-item span").text

            yield scrapy.Request(item["detail_url"],meta={"item":item}, callback=self.parse_detail)

        self.current_page += 1  # 页码加1
        if self.current_page <= 10:
            # 构建下一页的url地址
            next_page = f"https://www.cnblogs.com/sitehome/p/{self.current_page}"

            # 发起请求
            yield scrapy.Request(next_page)

    # 处理详情
    def parse_detail(self, response):

        item = response.meta["item"]

        bs2 = BeautifulSoup(response.text, "lxml")
        post_body = bs2.select_one("#cnblogs_post_body").text
        # print(response.url)

        item["content"] = post_body  # 文章详情内容

        yield item


    # 用第二种方式实现多页采集，一直爬到没有下一页为止
    # def parse(self, response):
    #
    #     bs = BeautifulSoup(response.text, "lxml")
    #     sections = bs.select("#post_list article.post-item section.post-item-body")
    #     for section in sections:
    #         item = ItblogItem()
    #         # 博客标题详情页地址
    #         a = section.select_one("div.post-item-text a.post-item-title")
    #         item["detail_url"] = a["href"]
    #         item["title"] = a.text
    #         # 摘要
    #         description = section.select_one("div.post-item-text p.post-item-summary").text
    #         # 去掉摘要空白
    #         item["description"] = description.strip("\n\t ...")
    #         # 博客作者
    #         item["author"] = section.select_one("footer.post-item-foot a.post-item-author span").text
    #         item["publish_time"] = section.select_one("footer.post-item-foot span.post-meta-item span").text
    #
    #         yield item
    #
    #     # 获取下一页的标签
    #     a_node = bs.select_one("#paging_block div.pager a.last+a")
    #
    #     if a_node:
    #         # 拼接url地址
    #         domain = self.allowed_domains[0]
    #         url = domain + a_node["href"]
    #
    #         yield scrapy.Request(url)


