#-*- coding: utf -8-*-

# 应届毕业生网的spider，爬取job和company信息

__author__ = 'yanyan'

from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from chinahr.items import JobInfoItem, ComInfoItem
from scrapy.loader import ItemLoader
from scrapyluke.processors import *

class YingJieSpider(CrawlSpider):
    name = 'yingjie'
    allowed_domains = ['yjbys.com']
    start_urls = ['http://www.yjbys.com/',
                  'http://www.yjbys.com/company/',
                  'http://www.yjbys.com/mingqi/',
                  'http://www.yjbys.com/jobs/',
                  'http://www.yjbys.com/sydwzp/',
                  'http://www.yjbys.com/gaoxiao/',
                  'http://www.yjbys.com/shixi/',
                  'http://www.yjbys.com/tuijian/',
                  'http://www.yjbys.com/xiaoyuanzhaopin/',
                  ]

    pages_lx = LinkExtractor(restrict_xpaths='//div[@class="pages"]')
    job_category_lx = LinkExtractor(restrict_xpaths='//ul[@class="sl-2"]', allow=("/zhaopin/",))
    jobs1_lx = LinkExtractor(restrict_xpaths='//span[@class="zhaopin-name "]', deny=("/company/",))
    jobs2_lx = LinkExtractor(restrict_xpaths='//span[@class="zhaopin-name"]', deny=("/company/",))
    jobs3_lx = LinkExtractor(restrict_xpaths='//span[@class="zhaopin-name fred fblod"]', deny=("/company/",))
    com_lx = LinkExtractor(restrict_xpaths='//span[@class="zhaopin-name "]', allow=("/company/",))

    rules = (
        Rule(job_category_lx),
        Rule(pages_lx),
        Rule(jobs1_lx, callback='parse_jobinfo', follow=True),
        Rule(jobs2_lx, callback='parse_jobinfo', follow=True),
        Rule(jobs3_lx, callback='parse_jobinfo', follow=True),
        Rule(com_lx, callback='parse_cominfo', follow=True),
    )

    def parse_jobinfo(self, response):
        loader = ItemLoader(item=JobInfoItem(), response=response)
        loader.add_value('url', value = response.url)
        loader.add_xpath('job_company', '//div[@class="company_info"]/span/text()')
        loader.add_xpath('job_name', '//div[@id="job_detail"]/h2/@title')
        loader.add_xpath('job_name', '//div[@class="title"]/h1/text()')
        loader.add_xpath('job_name', '//div[@class="posZhiwei"]/h1/text()')
        loader.add_xpath('job_update', '//div[@class="job_info"]//tr[1]/td[@class="value"][1]/text()')
        loader.add_xpath('job_update', '//div[@class="title"]/text()', re=u'发布时间：(.*)')
        loader.add_xpath('job_update', '//div[@class="pub-time"]/span[@class="date"][1]/text()', re=u'日期:(.*)')
        loader.add_xpath('job_location', '//div[@class="job_info"]//tr[1]/td[@class="value"][2]/text()')
        loader.add_xpath('job_nature', '//div[@class="job_info"]//tr[3]/td[@class="value"][1]/text()')
        loader.add_xpath('job_miniEdu', '//div[@class="job_info"]//tr[3]/td[@class="value"][2]/text()')
        loader.add_xpath('job_recruNums', '//div[@class="job_info"]//tr[5]/td[@class="value"][1]/text()')
        loader.add_xpath('job_salary', '//div[@class="job_info"]//tr[5]/td[@class="value"][2]/text()')
        loader.add_xpath('job_detail', '//div[@class="job_request"]//p/text()', ReplaceBlank(), RemoveTagsL(), JoinL('\n'))
        loader.add_xpath('job_detail', '//div[@class="posDesc"]//p/text()', JoinL('\n'))
        loader.add_xpath('job_detail', '//div[@class="job_request"]//span/text()', JoinL('\n'))

        loader.add_xpath('job_name', '//div[@class="cont_div"]/ul/li[1]/span[@class="value"]/text()')
        loader.add_xpath('job_location', '//div[@class="cont_div"]/ul/li[2]/span[@class="value"]/text()')
        loader.add_xpath('job_update', '//div[@class="cont_div"]/ul/li[2]/span[@class="value"]/text()')
        loader.add_xpath('job_detail', '//div[@class="content"]//p/text()', ReplaceBlank(), RemoveTagsL(), JoinL('\n'))
        return loader.load_item()

    def parse_cominfo(self, response):
        loader = ItemLoader(item=ComInfoItem(), response=response)
        loader.add_value('url', value = response.url)
        loader.add_xpath('com_name', '//div[@class="company_info"]/span/text()', TakeFirstL())
        loader.add_xpath('com_industry', '//div[@class="comDesc"]/ul[@class="info"]/li[2]/text()', re=u'行业：(.*)')
        loader.add_xpath('com_nature', '//div[@class="comDesc"]/ul[@class="info"]/li[3]/text()', re=u'性质：(.*)')
        loader.add_xpath('com_size', '//div[@class="comDesc"]/ul[@class="info"]/li[4]/text()', re=u'规模：(.*)')
        loader.add_xpath('com_intro', '//div[@class="comDesc"]/div[@class="hide-desc"]/p/text()', RemoveTagsL(), JoinL('\n'))
        loader.add_xpath('com_address', '//div[@class="comMap"]//span[2]/text()', re=u'详细地址：(.*)')
        return loader.load_item()