# -*- coding: utf-8 -*-
import scrapy
from chinahr.items import JobInfoItem, ComInfoItem
from scrapy.loader import ItemLoader
from scrapyluke.processors import *

__author__ = 'yanyan'


class DaJieSpider(scrapy.Spider):
    name = 'dajie'
    allowed_domains = ['dajie.com']
    start_urls_from = 'db'

    def parse(self, response):
        pages = response.xpath('//div[@class="paging"]')
        contents = response.xpath('//ul[@class="job-suggest-list"]/li')
        if contents:
            yield scrapy.Request(response.url, callback=self.parse_urls)
            url_parts = re.findall('(^.*_)(\d+)$', response.url)
            curr_page = int(url_parts[0][-1]) + 1
            yield scrapy.Request('%s%d' % (url_parts[0][0], curr_page), callback=self.parse)
        elif not pages:
            yield scrapy.Request(response.url, callback=self.parse)

    def parse_urls(self, response):
        job_urls = response.xpath('//div[@class="search-list-con"]/div[@class="jst-title-wrap"]/h3/a/@href').extract()
        com_urls = response.xpath('//div[@class="search-list-con"]/a[@class="search-company"]/@href').extract()
        for url in job_urls:
            yield scrapy.Request(url, callback=self.parse_job_info)
        for url in com_urls:
            yield scrapy.Request(url, callback=self.parse_com_info)


    def parse_job_info(self, response):
        loaderJob = ItemLoader(item=JobInfoItem(), response=response)
        loaderJob.add_value('url', value=response.url)
        loaderJob.add_xpath('job_name', u'//div[@class="p-corp-detail-title"]/h1/text()', ReplaceBlank(''))
        loaderJob.add_xpath('job_update', u'//p[@class="detail"]/span[@class="floatright"]/text()', re=u'发布于(.*)')
        loaderJob.add_xpath('job_company', u'//div[@class="i-corp-base-info"]/p[@class="title"]/a/text()', ReplaceBlank(''))
        loaderJob.add_xpath('job_salary', u'//span[@class="tag import"]/text()')
        loaderJob.add_xpath('job_salary', u'//div[@class="p-corp-detail-tag"]/dl/dt[text()="职位亮点"]/../dd/span[@class="tag"][text()="薪资面议"]/text()', ReplaceBlank(''), JoinL('|'))
        loaderJob.add_xpath('job_benefits', u'//div[@class="p-corp-detail-tag"]/dl/dt[text()="职位亮点"]/../dd/span[@class="tag"][text()!="薪资面议"]/text()', ReplaceBlank(''), JoinL('|'))
        loaderJob.add_xpath('job_desc_req', u'//div[@class="p-corp-detail-tag"]/dl/dt[text()="职位要求"]/../dd/span[@class="tag"]/text()', ReplaceBlank(''), JoinL('|'))
        loaderJob.add_xpath('job_location', u'//div[@class="p-corp-detail-info"]/dl/dt[text()="工作地点"]/../dd/p/text()', ReplaceBlank(''))
        loaderJob.add_xpath('job_recruNums', u'//div[@class="p-corp-detail-info"]/dl/dt[text()="招聘人数"]/../dd/p/text()', ReplaceBlank(''))
        loaderJob.add_xpath('job_desc_resp', u'//div[@class="p-corp-detail-info"]/dl/dt[text()="岗位职责"]/../dd/div[@id="jp_maskit"]//text()', ReplaceBlank(''), JoinL(' '))
        loaderJob.add_xpath('job_detail', u'//div[@class="p-corp-detail-info"]/dl/dt[text()!="工作地点"][text()!="招聘人数"][text()!="岗位职责"]/../dd//p/text()', ReplaceBlank(''), JoinL('|'))
        return loaderJob.load_item()

    def parse_com_info(self, response):
        loaderCom = ItemLoader(item=ComInfoItem(), response=response)
        loaderCom.add_value('url', value=response.url)
        loaderCom.add_xpath('com_name', u'//div[@class="detail-box"]/h3/text()', ReplaceBlank(''))
        loaderCom.add_xpath('com_industry', u'//div[@class="detail-box"]/ul[@class="detail"]/li[@class="clearfix"]/span[@class="tl"][text()="行业："]/../span[@class="c"]/text()', ReplaceBlank(''))
        loaderCom.add_xpath('com_nature', u'//div[@class="detail-box"]/ul[@class="detail"]/li[@class="clearfix"]/span[@class="tl"][text()="性质："]/../span[@class="c"]/text()', ReplaceBlank(''))
        loaderCom.add_xpath('com_size', u'//div[@class="detail-box"]/ul[@class="detail"]/li[@class="clearfix"]/span[@class="tl"][text()="规模："]/../span[@class="c"]/text()', ReplaceBlank(''))
        loaderCom.add_xpath('com_link', u'//div[@class="detail-box"]/ul[@class="detail"]/li[@class="clearfix"]/span[@class="tl"][text()="网址："]/../span[@class="c"]/text()', ReplaceBlank(''))
        loaderCom.add_xpath('com_detail', u'//div[@class="detail-box"]/ul[@class="detail"]/li[@class="clearfix"]/span[@class="tl"][text()="地区："]/../span[@class="c"]/text()', ReplaceBlank(''))
        loaderCom.add_xpath('com_address', u'//div[@class="detail-box"]/ul[@class="detail"]/li[@class="clearfix"]/span[@class="tl"][text()="地址："]/../span[@class="c"]/text()', ReplaceBlank(''))
        loaderCom.add_xpath('com_intro', u'//div[@class="compny-desc"]/p[@class="desc"]/text()', RemoveTagsL(), ReplaceBlank(''), JoinL('\n'))
        return loaderCom.load_item()






