from getJobs.items import jobItem
import scrapy


class A51jobsSpider(scrapy.Spider):
    name = '51jobs'
    allowed_domains = ['51job.com']
    start_urls = ['https://search.51job.com/list/090200,000000,0000,00,9,99,%2520,2,1.html']

    def parse(self, response):
        jobList = response.xpath('//div[@id="resultList"]//div[@class="el"]')
        item = jobItem()
        for ajob in jobList:
            item['name']         = ajob.xpath('./p/span/a/@title').get()
            item['job_url']      = ajob.xpath('./p/span/a/@href').get()
            item['company']      = ajob.xpath('./span[@class="t2"]/a/@title').get()
            item['company_url']  = ajob.xpath('./span[@class="t2"]/a/@href').get()
            item['work_palce']   = ajob.xpath('./span[@class="t3"]/text()').get()
            item['pay']          = ajob.xpath('./span[@class="t4"]/text()').get()
            item['publish_time'] = ajob.xpath('./span[@class="t5"]/text()').get()
            yield item
        next_url = response.xpath("//div[@class='p_in']/ul/li[@class='bk'][2]/a/@href").get()
        if next_url :
            yield scrapy.Request(
                next_url,
                callback = self.parse
            )
