# -*- coding: utf-8 -*-
import scrapy


class JobSpider(scrapy.Spider):
    name = 'job'
    allowed_domains = ['search.51job.com']
    start_urls = ['https://search.51job.com/list/020000,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare=']

    def parse(self, response):
        div_list = response.xpath('//div[@class="dw_table"]/div[@class="el"]')
        for div in div_list:
            item ={}
            item['job'] = div.xpath('./p/span/a/text()').extract_first().strip()
            item['blank'] = div.xpath('./span[@class="t2"]/a/@title').extract_first()
            item['city'] = div.xpath('./span[2]/text()').extract_first()
            item['date_time'] = div.xpath('./span[4]/text()').extract_first()
            yield item
            print(item)

        next_url = response.xpath('//a[text()="下一页"]/@href').extract_first()
        if next_url:
            yield scrapy.Request(
                next_url,
                callback=self.parse
            )
