# -*- coding: utf-8 -*-
from ZhipinSpider.items import *


class SpiTestSpider(scrapy.Spider):
    name = 'spi_test'
    allowed_domains = ['www.zhipin.com']
    start_urls = ["https://www.zhipin.com/job_detail/?query=java&city=100010000&industry=&position="]

    def parse(self, response):     #response代表下载器从start_urls中的每个URL下载得到的响应
        for job_primary in response.xpath('//div[@class="job-primary"]'):
            print(job_primary)
            item = ZhipinspiderItem()
            #匹配//div[@class="job-primary"]节点下的./div[@class="info-primary"]节点
            #也就是匹配到包含工作信息的<div.../>元素
            info_primary = job_primary.xpath('./div[@class="info_primary"]')
            item['title'] = info_primary.xpath('./h3/a/div[@class="job-title"]/text()').extract_first()
            item['salary'] = info_primary.xpath('./h3/a/span[@class="red"]/text()').extract_first()
            item['work_addr'] = info_primary.xpath('./p/text()').extract_first()
            item['url'] = info_primary.xpath('./h3/a/@href').extract_first()
            # 匹配//div[@class="job-primary"]节点下的/div[@class="info-primary"]节点下
            #的/div[@class="company-text"]节点
            #也就是匹配到包含公司信息的<div..../>
            company_text = job_primary.xpath('./div[@class="info-primary"]/div[@class="company-text"]')
            item['company'] = company_text.xpath('./h3/a/text()').extract_first()
            company_info = company_text.xpath('./p/text()').extract()
            if company_info and len(company_info) > 0:
                item['industry'] = company_text.xpath('./p/text()').extract()[0]
            if company_info and len(company_info) > 1:
                item['company_size'] = company_text.xpath('./p/text()').extract()[2]
            #匹配//div/[@class="job-primary"]节点下的./div[@class="infi-publis"]节点
            #也就是匹配到包含发布人信息的<div.../>元素
            info_publis = job_primary.xpath('./div[@class="info-publis"]')
            item['recruiter'] = info_publis.xpath('./h3/text()').extract_first()
            item['publish_date'] = info_publis.xpath('./p/text()').extract_first()
            yield item