# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ..items import IpProxyItem


class ZhipinSpider(CrawlSpider):
    name = 'zhipin'
    allowed_domains = ['www.zhipin.com']
    start_urls = [
        'https://www.zhipin.com/c101270100/?query=python&page=1&ka=page-1'
    ]

    rules = (
        Rule(LinkExtractor(allow=r'.+\?query=python&page=\d'), follow=True),
        Rule(LinkExtractor(allow=r'job_detail/.+\.html'),
             callback='parse_detail', follow=False)
    )

    def parse_detail(self, response):
        title = response.xpath('//div[@class="name"]/h1/text()').get()
        salary = response.xpath('//div[@class="name"]/span/text()').get()
        jobInfo = response.xpath('//*[@id="main"]/div[3]/div/div[2]/div[2]/div[1]/div/text()').getall()
        jobInfo = list(map(lambda x: x.strip(), jobInfo))
        jobInfo = jobInfo[4]
        jobInfo.replace('1.', '')
        company = response.xpath('//div[@class="job-sec"]/div[@class="name"]/text()').get()
        item = IpProxyItem(name=title, salary=salary,
                           jobInfo=jobInfo, company=company)
        return item
