# -*- coding: utf-8 -*-
import scrapy
from boss_job.items import BossJobItem
import time


class BossSpider(scrapy.Spider):
    name = 'boss'
    allowed_domains = ['zhipin.com']
    key_word = input("请输入需要查询的关键字（python,java,酒店管理,销售,或者职业部分相关关键字）：")
    page = 0
    if key_word:
        url = 'https://www.zhipin.com/c101270100/?query=' + key_word  #前面的网址部分需要实时在boss上取
        start_urls = [url]
    else:
        print("不能为空")


    def parse(self, response):
        item = BossJobItem()  # 导入item容器
        job_list = response.xpath('//div[@class="job-list"]//ul//li')  # 所有工作标签列表
        for job in job_list:
            item['company_name'] = job.xpath('.//div[@class="info-company"]//h3/a/text()').extract_first()  # 公司名字
            item['company_locale'] = job.xpath('.//div[@class="info-primary"]//p/text()').extract_first()  # 所在城市
            item['job_name'] = job.xpath('.//div[@class="job-title"]/text()').extract_first()  # 工作名称
            item['job_salary'] = job.xpath('.//div[@class="info-primary"]//span/text()').extract_first()  # 薪水
            item['experience_demand'] = job.xpath('.//div[@class="info-primary"]//p/text()').extract()[1]  # 工作经验需求
            item['educational_demand'] = job.xpath('.//div[@class="info-primary"]//p/text()').extract()[2]  # 学历需求
            # 因为有部分公司，没有融资说明，所以数据会空缺，程序会中断，特判处理
            if len(job.xpath('.//div[@class="info-company"]//p/text()').extract()) == 3:
                item['company_work'] = job.xpath('.//div[@class="info-company"]//p/text()').extract_first()  # 公司种类
                item['finance'] = job.xpath('.//div[@class="info-company"]//p/text()').extract()[1]  # 融资
                item['persons_in_company'] = job.xpath('.//div[@class="info-company"]//p/text()').extract()[2]  # 公司人数
            else:
                item['company_work'] = job.xpath('.//div[@class="info-company"]//p/text()').extract_first()  # 公司种类
                item['persons_in_company'] = job.xpath('.//div[@class="info-company"]//p/text()').extract()[1]  # 公司人数
            item['crawl_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())  # 爬取时间
            yield item
        next_page = self.page + 1
        self.page = next_page
        print("*" * 120)
        if next_page <= 100 :
            if next_page != "javascript:;":  # 如果不是最后一页，就回调parse函数，继续爬取
               next_url =  self.url + '&page=' + str(self.page)
               yield scrapy.Request(next_url, callback=self.parse)
        else:
            print('数据抓取结束')

