# -*- coding: utf-8 -*-
import scrapy
from scrapy.http import Request


class Jobs58Spider(scrapy.Spider):
    name = "jobs58"
    allowed_domains = ["58.com"]
    start_urls = ['http://sh.58.com/job/']
    for i in range(2, 71):
        start_urls.append('http://sh.58.com/job/pn%d/' % i)

    def parse(self, response):
        links = response.xpath('//a[@class="t"]/@href').extract()
        for link in links:
            yield Request(link, callback=self.getDetail)

    def getDetail(self, response):
        print('got', response.url, type(response))
