# -*- coding: utf-8 -*-
import scrapy
import re

from scrapy.http import Request
import urlparse

class JobboleSpider(scrapy.Spider):
    name = "jobbole"
    allowed_domains = ["http://www.jobbole.com/"]
    start_urls = ['http://top.jobbole.com/']

    def parse(self, response):

        #解析所有网页并交给scrapy下载
        post_urls = response.css(".media .media-body .p-tit a::attr(href)").extract()
        for post_url in post_urls:
            yield Request(url = urlparse.urljoin(response.url,post_url),callback = self.parse_detail )


        #提取下一页并交给scrapy进行下载
        next_url = response.css(".pagination.p-paging a::attr(href)").extract_first()
        if next_url :
            yield Request(url=urlparse.urljoin(response.url, post_url), callback=self.parse)

    def parse_detail(self,response):
        #xpath选择法

        title = response.xpath("//h1[@class= 'p-tit-single']").extract()[0]

        fav_num = response.xpath("//*[@id='36599votetotal']/text()").extract()[0]

        match_re = re.match(".*?(\d+).*",fav_num)
        if match_re:
            fav_num = match_re.group(1)

        time = response.xpath("//p[@class = 'p-meta']").extract()[0]

        content = response.xpath("//div[@class= 'p-entry']").extract()[0]


        #css选择器

        title1 =response.css(".p-tit-single ").extract()[0]

        fav_num1 =response.css(".vote-post-up ").extract()[0]

        match_re1 = re.match(".*?(\d+).*", fav_num)
        if match_re1:
            fav_num1 = match_re1.group(1)

        time1 =response.css(".p-meta ").extract()[0]

        content1 = response.css(".p-entry ").extract()[0].encode('utf-8')

        pass

