# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import DabossItem


class ZhipinSpider(CrawlSpider):
    name = 'zhipin'
    allowed_domains = ['zhipin.com']
    start_urls = ['https://www.zhipin.com/c100010000/?query=python&page=1']

    rules = (
        #编写匹配职位列表的规则
        Rule(LinkExtractor(allow=r'.+\?query=python&page=\d'), follow=True),
        #匹配职位详情
        Rule(LinkExtractor(allow=r'.+job_detail/[a-zA-Z0-9-_]+~\.html'),callback="parse_job",follow=False),
    )

    def parse_job(self, response):
        name = response.xpath("//div[@class='name']/h1/text()").get().strip()
        salary = response.xpath("//span[@class='salary']/text()").get().strip()
        job_info = response.xpath("//div[contains(@class,'job-primary')]/div[@class='info-primary']/p//text()").getall()
        city = job_info[0]
        work_year = job_info[1]
        education = job_info[2]
        position_info = "".join(response.xpath("//div[@class='job-sec']/div[1][@class='text']/text()").getall()).strip()

        company = response.xpath("//div[@class='company-info']//a[2]/text()").get().strip()
        item = DabossItem(name=name,salary=salary,city=city,work_year=work_year,education=education,position_info=position_info,company=company)
        yield item
