# -*- coding: UTF-8 -*-
import scrapy

from zhipin.items import ZhipinItem
import re
class ZhipinSpiderSpider(scrapy.Spider):
    # 爬虫名称
    name = 'zhipin_spider'
    # 允许的域名
    allowed_domains = ['www.zhipin.com']
    # 入口url
    start_urls = ['https://www.zhipin.com/c101280100-p100901/']

    def parse(self, response):
        # print(response.text)
        job_list = response.xpath("//div[@class='job-list']/ul/li")
        print(f"job_list:{len(job_list)}")
        for i_item in job_list:
            zhipin_item = ZhipinItem()
            zhipin_item['_id'] = i_item.xpath("./div[@class='job-primary']/div[@class='info-primary']/div[@class='primary-wrapper']/div[@class='primary-box']/div[@class='job-title']/span[@class='job-name']/a/@data-jid").extract_first()
            zhipin_item['job_title'] = i_item.xpath("./div[@class='job-primary']/div[@class='info-primary']/div[@class='primary-wrapper']/div[@class='primary-box']/div[@class='job-title']/span[@class='job-name']/a/text()").extract_first()

            zhipin_item['job_area'] = i_item.xpath("./div[@class='job-primary']/div[@class='info-primary']/div[@class='primary-wrapper']/div[@class='primary-box']/div[@class='job-title']/span[@class='job-area-wrapper']/span[@class='job-area']/text()").extract_first()
            zhipin_item['job_limit_salary'] = i_item.xpath("./div[@class='job-primary']/div[@class='info-primary']/div[@class='primary-wrapper']/div[@class='primary-box']/div[@class='job-limit clearfix']/span[@class='red']/text()").extract_first()

            zhipin_item['job_limit'] = i_item.xpath("./div[@class='job-primary']/div[@class='info-primary']/div[@class='primary-wrapper']/div[@class='primary-box']/div[@class='job-limit clearfix']/p/text()").extract()
            zhipin_item['job_detail'] = ''.join(i_item.xpath("./div[@class='job-primary']/div[@class='info-primary']/div[@class='primary-wrapper']/div[@class='primary-box']/div[@class='info-detail']/div[@class='detail-bottom']/div[@class='detail-bottom-text']/text()").extract())

            zhipin_item['info_title'] = i_item.xpath("./div[@class='job-primary']/div[@class='info-primary']/div[@class='info-company']/div[@class='company-text']/h3[@class='name']/a/text()").extract_first()
            zhipin_item['info_company'] = i_item.xpath("./div[@class='job-primary']/div[@class='info-primary']/div[@class='info-company']/div[@class='company-text']/p/text()").extract()
            zhipin_item['info_tags'] = i_item.xpath("./div[@class='job-primary']/div[@class='info-append clearfix']/div[@class='tags']/span/text()").extract()
            zhipin_item['info_desc'] = i_item.xpath("./div[@class='job-primary']/div[@class='info-append clearfix']/div[@class='info-desc']/text()").extract_first()

            
            # print(zhipin_item)
            # 把数据传到 ZhipinPipeline
            yield zhipin_item

        # 如果还有数据 点击下一页
        next_link = response.xpath("//div[@id='main']/div[@class='job-box show-top']/div[@class='job-list']/div[@class='page']/a[@class='next']/@href").extract_first()
        if next_link and next_link !='javascript:;':
            # 提交给调度器
            print(f'next_link:{next_link}')
            yield scrapy.Request("https://www.zhipin.com/"+next_link,callback=self.parse)
        # print('parse complete')
