# -*- coding: utf-8 -*-

# 智联招聘的spider，爬取job和company信息

import scrapy
from scrapy.loader import ItemLoader
from chinahr.items import JobInfoHLJ
from scrapyluke.processors import *

__author__ = 'lx'


# 黑龙江
class ZhaopinCrawlSpider(scrapy.Spider):
    name = 'zhilianzhaopin'

    def __init__(self):
        super(ZhaopinCrawlSpider, self).__init__()

        self.hylb = ['210500', '160400', '160000', '160500', '160200', '300100', '160100', '160600', '180000', '180100',
                     '300500', '300900', '140000', '140100', '140200', '200300', '200302', '201400', '201300', '300300',
                     '120400', '120200', '170500', '170000', '300700',
                     '201100', '120800', '121000', '129900', '121100', '121200', '210600', '120700', '121300', '121500',
                     '300000', '150000', '301100', '121400', '200600', '200800', '200700', '130000', '120500', '130100',
                     '201200', '200100', '120600', '100000', '100100', '990000']
        self.el = ['8', '7%3B5%3B4%3B3%3B1', '-1']
        self.city = ['哈尔滨', '齐齐哈尔%2B鸡西%2B鹤岗%2B双鸭山', '大庆', '伊春%2B佳木斯%2B七台河', '牡丹江%2B黑河%2B绥化%2B大兴安岭%2B安达', '双城%2B尚志%2B绥芬河%2B肇东市']

    def start_requests(self):
        for curr_hylb in self.hylb:
            for curr_el in self.el:
                for curr_city in self.city:
                    url = 'http://sou.zhaopin.com/jobs/searchresult.ashx?in=' + curr_hylb + '&jl=' + curr_city + '&sm=0&p=1&sf=0&st=99999&el=' + curr_el + '&isadv=1'
                    yield scrapy.Request(url=url, callback=self.parse)

    def parse(self, response):
        urls = response.xpath('//div[@class="newlist_list_content"]').re(u'(?<=href=")http://jobs.zhaopin.com/.*?(?=")')
        if urls:
            for url in urls:
                yield scrapy.Request(url, callback=self.parse_info)
            next_url = response.xpath("//li[@class='pagesDown-pos']/a[@class='next-page']/@href").extract_first()
            if next_url:
                yield scrapy.Request(next_url, callback=self.parse)

    def parse_info(self, response):
        loaderjob = ItemLoader(item=JobInfoHLJ(), response=response)
        loaderjob.add_value('url', value=response.url)
        loaderjob.add_xpath('job_name', '//div[@class="inner-left fl"][1]/h1/text()', TakeFirstL())
        loaderjob.add_xpath('job_com_name', '//div[@class="inner-left fl"][1]/h2/a/text()', TakeFirstL())
        loaderjob.add_xpath('job_benefits', '//div[@class="inner-left fl"][1]/div/span/text()', JoinL('|'))
        loaderjob.add_xpath('job_salary', '//ul[@class="terminal-ul clearfix"]/li', TakeFirstL(), re=u'(?<=职位月薪：</span><strong>).*(?=</strong></li>)')
        loaderjob.add_xpath('job_location', '//ul[@class="terminal-ul clearfix"]/li', RemoveTagsL(), TakeFirstL(), re=u'(?<=工作地点：</span><strong>).*(?=</strong></li>)')
        loaderjob.add_xpath('job_update', '//ul[@class="terminal-ul clearfix"]/li', RemoveTagsL(), TakeFirstL(), re=u'(?<=发布日期：</span><strong>).*(?=</strong></li>)')
        loaderjob.add_xpath('job_nature', '//ul[@class="terminal-ul clearfix"]/li', TakeFirstL(), re=u'(?<=工作性质：</span><strong>).*(?=</strong></li>)')
        loaderjob.add_xpath('job_experience', '//ul[@class="terminal-ul clearfix"]/li', TakeFirstL(), re=u'(?<=工作经验：</span><strong>).*(?=</strong></li>)')
        loaderjob.add_xpath('job_miniEdu', '//ul[@class="terminal-ul clearfix"]/li', TakeFirstL(), re=u'(?<=最低学历：</span><strong>).*(?=</strong></li>)')
        loaderjob.add_xpath('job_recruNums', '//ul[@class="terminal-ul clearfix"]/li', TakeFirstL(), re=u'(?<=招聘人数：</span><strong>).*(?=</strong></li>)')
        loaderjob.add_xpath('job_category', '//ul[@class="terminal-ul clearfix"]/li', RemoveTagsL(), TakeFirstL(), re=u'(?<=职位类别：</span><strong>).*(?=</strong></li>)')
        loaderjob.add_xpath('job_info', '//div[@class="tab-inner-cont"][1]', ExtractTextL(), StripBlankL(), JoinL('|'))

        loaderjob.add_xpath('url', '//div[@class="company-box"]/p[@class="company-name-t"]/a/@href', TakeFirstL())
        # loaderjob.add_xpath('job_com_name', '//div[@class="company-box"]/p[@class="company-name-t"]/a/text()', TakeFirstL())
        loaderjob.add_xpath('com_size', '//div[@class="company-box"]/ul/li', ExtractTextL(), TakeFirstL(),  re=u'(?<=公司规模[:,：]).*')
        loaderjob.add_xpath('com_nature', '//div[@class="company-box"]/ul/li', ExtractTextL(), TakeFirstL(),  re=u'(?<=公司性质[:,：]).*')
        loaderjob.add_xpath('com_industry', '//div[@class="company-box"]/ul/li', ExtractTextL(), TakeFirstL(),  re=u'(?<=公司行业[:,：]).*')
        loaderjob.add_xpath('job_com_info', '//div[@class="tab-inner-cont"][2]', ExtractTextL(), StripBlankL(), JoinL('|'))
        loaderjob.add_xpath('com_link', '//div[@class="company-box"]/ul/li', ExtractTextL(), TakeFirstL(),  re=u'(?<=公司主页[:,：]).*')
        loaderjob.add_xpath('com_address', '//div[@class="company-box"]/ul/li', RemoveTagsL(), TakeFirstL(),  re=u'(?<=公司地址[:,：])[\s\S]*(?=</strong>)')
        return loaderjob.load_item()