import scrapy
import logging
from myspider.items import BlogListItem

logger = logging.getLogger(__name__)
class blogSpider(scrapy.Spider):
    name = 'blog'
    allowed_domains = ['ehei.top']
    start_urls = ['http://www.ehei.top/blog/page/1/']

    def parse(self, response):
        aList = response.xpath('//article/h2/a')
        for aDom in aList:
            item = BlogListItem()
            item['title'] =  aDom.xpath('text()').get()
            item['url']  = aDom.xpath('@href').get()
            yield scrapy.http.Request(
                item['url'],
                callback = self.parseContent,
                meta = {"item": item}
            )
        next_url = response.xpath('//li[@class="next"]/a').xpath('@href').get()
        print(next_url)
        if next_url:
            yield scrapy.http.Request(
                next_url,
                callback=self.parse
            )
        
    def parseContent(self, response):
        item = response.meta['item']
        item['pub_time'] = response.xpath("//ul[@class='post-meta']//time/@datetime").get()
        item['content'] = response.xpath("//div[@class='post-content']").get()
        yield item