# -*- coding: utf-8 -*-
import scrapy
from chinahr.items import JobInfoHLJ
from scrapyluke.processors import *
import datetime

# 中华英才网的spider，爬取job和company信息

__author__ = 'lx'


# 黑龙江
class ChinahrSpider(scrapy.Spider):

    name = 'chinahr'
    start_urls = ['http://www.chinahr.com/sou/?orderField=relate&city=15&page=1']

    def parse(self, response):
        industrys = ['1001', '1002', '1003', '1004', '1005', '1006', '1007', '1009', '1010', '1008%2C1042', '1008%2C1043', '1008%2C1044', '1008%2C1045', '1008%2C1046']
        companytype = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '99']
        # industrys = ['1001']
        # companytype = ['2']
        for indus in industrys:
            for type_curr in companytype:
                url = 'http://www.chinahr.com/sou/?orderField=relate&companyType=' + type_curr + '&industrys=' + indus + '&city=15&page=1'
                yield scrapy.Request(url=url, callback=self.page_jump)

    def page_jump(self, response):
        urls = response.xpath("//div[@class='jobList']/ul/li[@class='l1']/span[@class='e1']/a/@href").extract()
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse_info)
        if urls:
            str_page = re.findall(r'page=(\d+)', response.url)
            head_url = re.findall(r'([\s\S]*?)page=', response.url)[0] if re.findall(r'([\s\S]*?)page=', response.url) else None
            # print response.url
            if str_page:
                page = int(str_page[0])
                next_page = 'page=' + str(page + 1)
                # print next_page
                yield scrapy.Request(url=head_url + next_page, callback=self.page_jump)

    def parse_info(self, response):
        # print response.xpath("//span[@class='job_name']/text()").extract_first()
        jobinfo = JobInfoHLJ()
        jobinfo['url'] = response.url
        jobinfo['insert_time'] = datetime.datetime.now()
        jobinfo['job_name'] = response.xpath("//span[@class='job_name']/text()").extract_first()
        jobinfo['job_salary'] = response.xpath("//span[@class='job_price']/text()").extract_first()
        jobinfo['job_location'] = response.xpath("//div[@class='job_require']/span[@class='job_loc']/text()").extract_first()
        jobinfo['job_benefits'] = '|'.join(response.xpath("//div[@class='job_fit_tags']/ul[@class='clear']/li/text()").extract()).replace('收起', '')
        jobinfo['job_update'] = response.xpath("//p[@class='updatetime']/text()").extract_first()
        jobinfo['job_nature'] = response.xpath("//div[@class='job_require']/span[3]/text()").extract_first()
        jobinfo['job_experience'] = response.xpath("//span[@class='job_exp']/text()").extract_first()
        jobinfo['job_req'] = '|'.join(response.xpath("//div[@class='job_intro_tag']/span/text()").extract())
        jobinfo['job_info'] = ''.join(response.xpath("//div[@class='job_intro jpadding  mt15']//text()").extract()).strip().replace(' ', '').replace('	', '')
        jobinfo['job_com_name'] = response.xpath("//div[@class='job-company jrpadding']/h4//text()").extract_first()
        jobinfo['job_com_info'] = response.xpath("//div[@class='company_service']/text()").extract_first()
        jobinfo['com_size'] = '|'.join(response.xpath("//div[@class='compny_tag']/span/text()").extract())
        return jobinfo
        # print jobinfo['com_size']
