# -*- coding: utf-8 -*-
import scrapy
from ..items import *

class CnblogsSpider(scrapy.Spider):
    name = 'cnblogs'
    allowed_domains = ['www.cnblogs.com']
    start_urls = ['https://www.cnblogs.com/#p1']

    def parse(self, response):
        print(response.request.url)
        articles = response.xpath('//div[@id="post_list"]/article')
        i = 0
        for article in articles:
            articleitem = CnBlogsListItem()
            articleitem['curl'] = response.request.url
            articleitem['num'] = i
            i += 1
            articleitem['title'] = article.xpath('./section/div/a/text()').extract_first()
            articleitem['content'] = article.xpath('./section/div/p/text()').extract_first()
            articleitem['author'] = article.xpath(
                './section/footer/a[@class="post-item-author"]/span/text()').extract_first()
            articleitem['time'] = article.xpath('./section/footer/span/span/text()').extract_first()
            articleitem['digg'] = article.xpath(
                './section/footer/a[@class="post-meta-item btn"][1]/span/text()').extract_first()
            articleitem['comments'] = article.xpath(
                './section/footer/a[@class="post-meta-item btn"][2]/span/text()').extract_first()
            articleitem['read'] = article.xpath('./section/footer/a[4]/span/text()').extract_first()

            yield articleitem

        num = int(response.request.url[response.request.url.index('#p') + 2:len(response.request.url)])
        if num < 100:
            nexturl = response.request.url[0:response.request.url.index('#p') + 2] + str(num + 1)
            yield scrapy.Request(nexturl, callback=self.parse,dont_filter=True)
