# -*- coding: utf-8 -*-
import scrapy
from boss.items import BossItem

class BoosJobSpider(scrapy.Spider):
    name = 'boos_job'
    allowed_domains = ['zhipin.com']
    start_urls = ['https://www.zhipin.com/c100010000-p100109']

    def parse(self, response):
        # print(response.body)
        job_list = response.xpath("//*[@class='job-primary']")
        for job in job_list:
            item = BossItem()
            item['name'] = job.xpath("./div[1]/h3/a/div/text()").extract_first()
            item['money'] = job.xpath("./div[1]/h3/a/span/text()").extract_first()
            item['company'] = job.xpath("./div[2]/div/h3/a/text()").extract_first()
            item['hr'] = job.xpath("./div[3]/h3/text()").extract_first()
            item['address'] = job.xpath("./div[1]/p/text()").extract_first()
            item['direction'] = job.xpath("./div[2]/div/p/text()").extract_first()
            item['time'] = job.xpath("./div[3]/p/text()").extract_first()
            # print(item)
            yield item

        next_url = response.urljoin(response.xpath("//*[@class='next']/@href").extract_first())
        if next_url:

            yield scrapy.Request(
                next_url,
                callback=self.parse
            )
            print((str(next_url).split('&')[0]).split('?')[-1])

