# -*- coding: utf-8 -*-
import scrapy
from cnblogspider.items import CnblogspiderItem


class BigspiderSpider(scrapy.Spider):
    name = 'bigspider'
    allowed_domains = ['cnblogs.com']
    start_urls = ['https://www.cnblogs.com/#p'+str(i) for i in range(1,201,1)]

    def parse(self, response):
        titles = response.xpath('.//div[contains(@class,"post_item_body")]/h3/a/text()')
        authors = response.xpath('.//div[contains(@class,"post_item_foot")]/a/text()')
        times = response.xpath('.//div[contains(@class,"post_item_foot")]')
        intros = response.xpath('.//p[contains(@class,"post_item_summary")]')
        for title,author,time,intro in zip(titles,authors,times,intros):
            item = CnblogspiderItem()
            item['title'] = title.extract()
            item['author'] = author.extract()
            item['time'] = time.xpath('string(.)').extract()
            item['intro'] = intro.xpath('string(.)').extract()
            yield item

