# ***coding: utf-8***

import scrapy
from scrapy.loader import ItemLoader
from chinahr.items import JobInfoItem, ComInfoItem
from scrapyluke.processors import *
import re
import json
import urllib2


def get_js_data():
    js_url = 'http://img.job5156.com/static/js/v1/data/otherData.min.js'
    js_page_regex = 'datajson\.(.*?)=(\[.*?\])'
    js_json_match_regex = r'(id|name|en|value):'
    js_json_sub_regex = r'"\1":'
    return extract_js_data(js_url, js_page_regex, js_json_match_regex, js_json_sub_regex)


def extract_js_data(url, page_regex, json_match_regex, json_sub_regex):
    json_data = {}
    info = {}
    page = urllib2.urlopen(url).read()
    messages = re.findall(page_regex, page)
    for message in messages:
        json_str_data = message[1].decode('utf-8')
        json_str_data = re.sub(json_match_regex, json_sub_regex, json_str_data)
        json_data[message[0]] = json.loads(json_str_data)
    for key in json_data.keys():
        info[key] = {}
        for item in json_data[key]:
            if item.has_key('id'):
                info[key][item['id']] = item['name']
    return info


class ZhaopinCrawlSpider(scrapy.Spider):
    name = 'job5156'
    allowed_domains = ['job5156.com']
    data = get_js_data()
    start_urls_from = 'db'

    # Spider默认的数据解析方法
    def parse(self, response):
        content_Maxpage_string = '>'.join(response.xpath('//div[@class="pageBar"]//text()').extract()).strip()
        Maxpage_Id = re.findall(u'>(\d+)>下一页>', content_Maxpage_string)
        if Maxpage_Id:
            for i in range(1, int(Maxpage_Id[0])+1):
                yield scrapy.Request('{prefix}&pn={postfix}'.format(prefix=response.url, postfix=i), callback=self.parse_list)

    # 招聘信息列表的解析方法
    def parse_list(self, response):
        job_urls = response.xpath('//div[@class="jobSearchList"]//div[contains(@class,"postItem")]/div[@class="t1"]//@href').extract()
        com_urls = response.xpath('//div[@class="jobSearchList"]//div[contains(@class,"postItem")]/div[@class="t2"]//@href').extract()
        release_dates = response.xpath('//p[@class="other"]/span[last()]/text()').extract()
        for (job_url, release_date) in zip(job_urls, release_dates):
            yield scrapy.Request(job_url, meta={'release_date': release_date}, callback=self.parse_job_info)#在回调函数中进行多级页面传值。
        for com_url in com_urls:
            yield scrapy.Request(com_url, callback=self.parse_com_info)

    def parse_job_info(self, response):
        loaderJob = ItemLoader(item=JobInfoItem(), response=response)
        loaderJob.add_value('url', value=response.url)
        loaderJob.add_value('job_update', value=response.meta['release_date'])
        loaderJob.add_xpath('job_name', u'//div[@class="posTitBar"]/h1/text()', TakeFirstL())
        loaderJob.add_xpath('job_company', u'//div[@class="comInfo"]/div[@class="comTitle"]/h2/a/text()', TakeFirstL())
        loaderJob.add_xpath('job_experience', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=工作经验：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_benefits', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=淘职标签：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_salary', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=职位月薪：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_location', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=工作地点：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_recruNums', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=招聘人数：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_nature', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=工作性质：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_miniEdu', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=学历要求：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_reqSex', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=性别：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_reqAge', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=年龄：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_reqLan', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=语言要求：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_detail', u'//div[@id="posMainPanel"]/div[@class="Attributes"]', TakeFirstL(), re=u'(?<=招聘期限：</em>).*?(?=</p>)')
        loaderJob.add_xpath('job_desc', u'//div[@id="js_tab"]//pre[@id="posDesc"]/text()', TakeFirstL())

        job_category = ''.join(response.xpath(u'//span[@data-id="industry_label"]/label/text()').extract())
        print job_category
        print self.data['industry'][job_category]
        loaderJob.add_xpath('job_category', self.data['industry'][job_category])

        return loaderJob.load_item()

    def parse_com_info(self, response):
        loaderCom = ItemLoader(item=ComInfoItem(), response=response)
        loaderCom.add_value('url', value=response.url)
        loaderCom.add_xpath('com_name', u'//div[@class="comInfo"]/div[@class="comTitle"]/h1/text()', TakeFirstL())
        loaderCom.add_xpath('com_industry', u'//div[@class="comInfo"]/p/span[contains(@data-id,"industry")]/label/text()', TakeFirstL())
        loaderCom.add_xpath('com_size', u'//div[@class="comInfo"]/p/span[contains(@data-id,"employeeNumber")]/label/text()', TakeFirstL())
        loaderCom.add_xpath('com_nature', u'//div[@class="comInfo"]/p/span[contains(@data-id,"property")]/label/text()', TakeFirstL())
        loaderCom.add_xpath('com_benefits', u'//div[@class="comInfo"]/p[@data-tao]/span/text()', JoinL('|'))
        loaderCom.add_xpath('com_intro', u'//div[@class="comMain"]/div[@class="cont"]/div[@class="text"]/pre/text()', TakeFirstL())
        loaderCom.add_xpath('com_address', u'//div[@class="comMain"]/div[@class="cont conatctBox"]/div[@class="contact"]/p[@id="cAddress"]/label/text()', TakeFirstL())
        loaderCom.add_xpath('com_link', u'//div[@class="comInfo"]//span[@data-id="homePage"]//@href', TakeFirstL())
        loaderCom.add_xpath('com_link', u'//div[@class="comInfo"]//span[@data-id="homePage"]//@href', TakeFirstL())
        return loaderCom.load_item()