import scrapy

# from parsel import Selector
from cnblogSpider.items import CnblogSpiderItem


class CnblogsSpider(scrapy.Spider):
    name = "cnblogs"  # JOH WA
    allowed_domains = [("enblogs.com")]  # WMA
    start_urls = [
        "http://www.cnblogs.com/qiyeboy/default.html?page=1"
    ]

    def parse(self, response):
        divs = response.xpath(".//*[@class='day']")
        for div in divs:
            # 发布时间
            time = div.xpath(".//*[@class='dayTitle']/a/text()").extract()[0].strip("\n")
            # 链接地址
            url = div.xpath(".//*[@class='postTitle']/a/@href").extract()[0].strip("\n")
            # 题目
            title = div.xpath(".//*[@class='postTitle']/a/span/text()").extract()[0].strip("\n").strip(" ").strip("\n")
            # 摘要
            content = div.xpath(".//*[@class='c_b_p_desc']//text()").extract()[0].strip("\n")
            item = CnblogSpiderItem(url=url, title=title, time=time, content=content)
            yield item
        next_page = response.xpath('//a[text()="下一页"]/@href').extract()  # 此处不能使用extract()[
        # 0]，因为如果末页没有下一页，此处是空列表，list【0】会报错
        if next_page:
            # dont_filter=True 详见 https://blog.csdn.net/no_needle/article/details/104365893
            yield scrapy.Request(url=next_page[0], callback=self.parse, dont_filter=True)

    def parse_body(self, response):
        item = response.meta['item']
        body = response.xpath(".//*[@class='postBody']")
        item['image_urls'] = body.xpath('.//img//@src').extract()  # 提取图片链接
        yield item






