# -*- coding: utf-8 -*-
import re
import datetime

from scrapy_redis.spiders import RedisSpider

import scrapy
from scrapy.http import Request
from urllib import parse
# from  ArticleSpider.items import JobBoleArticleItem,ArticleItemLoader
# from ArticleSpider.util.common import get_md5
from scrapy.loader import ItemLoader
from scrapy.xlib.pydispatch import dispatcher
from scrapy import signals



class JobboleSpider(RedisSpider):
    name = 'Jobbole'
    allowed_domains = ['jobbole.com']
    # start_urls = ['http://www.jobbole.com/']

    redis_key="jobbole:start_urls"

    def parse(self, response):
        '''
        获取当前页面，所有文章url
        获取当前页面下一节urldatetime
        :param response:
        :return:
        '''
        # 获取当前页面，所有文章url
        post_nodes=response.css("#archive .floated-thumb .post-thumb a")
        for post_node in post_nodes:
            #如果提取到的地址，没有域名
            #parse.urljoin(response.url,post_url)
            cover_image=post_node.css("img::attr(src)").extract_first()
            post_url=post_node.css("::attr(href)").extract_first()
            yield Request(url=post_url,meta={"cover_image":cover_image},callback=self.parse_detail)

    def parse_detail(self, response):
        pass

