# -*- coding: utf-8 -*-
import scrapy
from newsblog.items import NewsblogItem


class SpiderblogSpider(scrapy.Spider):
    name = 'spiderblog'
    allowed_domains = ['cnblogs.com']
    start_urls = ['https://news.cnblogs.com/n/page/'+str(x) for x in range(1,101,1)]
    def parse(self, response):
        titles = response.xpath('.//div[contains(@class,"content")]/h2/a/text()')
        texts = response.xpath('.//div[@class="content"]/div[contains(@class,"entry_summary")]')
        comments = response.xpath('.//div[contains(@class,"content")]/div[@class="entry_footer"]/span[@class="comment"]/a/text()')
        browses = response.xpath('.//div[contains(@class,"content")]/div[@class="entry_footer"]/span[@class="view"]/text()')
        tags = response.xpath('.//div[contains(@class,"content")]/div[@class="entry_footer"]/span[@class="tag"]/a/text()')
        times = response.xpath('.//div[contains(@class,"content")]/div[@class="entry_footer"]/span[@class="gray"]/text()')
        for title,text,comment,browse,tag,time in zip(titles,texts,comments,browses,tags,times):
            item = NewsblogItem()
            item['title'] = title.extract()
            item['text'] = text.xpath('string(.)').extract()
            item['comment'] = comment.extract()
            item['browse'] = browse.extract()
            item['tag'] = tag.extract()
            item['time'] = time.extract()
            yield item
