# -*- coding: utf-8 -*-
import HTMLParser
import scrapy
from chinahr.items import JobInfoHLJ
from scrapyluke.processors import *
import datetime
import cgi
__authur__ = 'lx'


# 51job的spider，爬取黑龙江
class Spider51job(scrapy.Spider):
    name = '51job_new'
    allowed_domains = ['51job.com']
    start_urls = ['http://jobs.51job.com/']

    def parse(self, response):
        urls = 'http://jobs.51job.com/heilongjiangsheng/p1'
        yield scrapy.Request(url=urls, callback=self.parse_urls)

    @staticmethod
    def addpage(number):
        return str(int(number)+1)

    def parse_urls(self, response):
        if not response.xpath('//p[@class="info"]'):
            return

        job_urls = response.xpath('//p[@class="info"]/span[@class="title"]/a/@href').extract()
        # com_urls = response.xpath('//p[@class="info"]/a/@href').extract()
        for url in job_urls:
            yield scrapy.Request(url, callback=self.parse_job_info)
        # for url in com_urls:
        #     yield scrapy.Request(url, callback=self.parse_com_info)
        #
        curr_page = ''.join(re.findall(r'/p(\d+)', response.url))
        next_page = int(curr_page) + 1
        next_url = re.sub(r'/p\d+', '/p%d' % next_page, response.url)
        yield scrapy.Request(next_url, callback=self.parse_urls)

    def parse_job_info(self, response):
        html_parser = HTMLParser.HTMLParser()
        jobinfo = JobInfoHLJ()
        jobinfo['url'] = response.url
        jobinfo['insert_time'] = datetime.datetime.now()
        jobinfo['job_name'] = response.xpath("//div[@class='cn']/h1/text()").extract_first()
        jobinfo['job_com_name'] = response.xpath("//div[@class='cn']/p[@class='cname']/a/text()").extract_first().replace('	', '')
        jobinfo['job_salary'] = response.xpath("//div[@class='cn']/strong/text()").extract_first()
        jobinfo['job_req'] = '|'.join(response.xpath("//div[@class='t1']/span[@class='sp4']/text()").extract())
        jobinfo['job_com_kind'] = html_parser.unescape(response.xpath("//div[@class='cn']/p[@class='msg ltype']/text()").extract_first().replace('	', '')).strip()
        jobinfo['job_benefits'] = '|'.join(response.xpath("//div[@class='jtag inbox']/p[@class='t2']/span/text()").extract())
        # haha = response.xpath("//div[@class='cn']/p[@class='msg ltype']/text()").extract_first()
        # print html_parser.unescape('&nbsp;')
        # print cgi.escape('	')
        jobinfo['job_info'] = ''.join([i.replace('	', '') for i in response.xpath("//div[@class='tCompany_main']/div[@class='tBorderTop_box'][1]//text()").extract()])
        jobinfo['job_contact_info'] = ''.join([i.replace('	', '') for i in response.xpath("//div[@class='tCompany_main']/div[@class='tBorderTop_box'][2]//text()").extract()])
        jobinfo['job_com_info'] = ''.join([i.replace('	', '') for i in response.xpath("//div[@class='tCompany_main']/div[@class='tBorderTop_box'][3]//text()").extract()])
        return jobinfo