# -*- coding: utf-8 -*-

# 拉勾网的spider，爬取job和company信息

__author__ = 'fhy'

import scrapy
import json
import urllib
from scrapy.spiders import Spider
from chinahr.items import JobInfoItem, ComInfoItem
from scrapy.loader import ItemLoader
from scrapyluke.processors import *


class LagouCrawlSpider(Spider):
    name = 'lagou'
    allowed_domains = ['lagou.com']
    start_urls_from = 'db'

    def parse(self, response):
        data = json.loads(response.body_as_unicode())
        total_page = data['content']['totalPageCount']
        category = re.search('kd=(.*?)&', response.url)
        category = urllib.unquote(category.group(1))
        if total_page != 0:
            response.meta['category'] = category
            for i in range(1, total_page+1):
                url = '%s&pn=%s' % (response.url, str(i))
                yield scrapy.Request(url, callback = self.parse_info1, meta = {'category': category})

    def parse_info1(self, response):
        sites = json.loads(response.body_as_unicode())
        for con in sites['content']['result']:
            job_url = 'http://www.lagou.com/jobs/'+str(con['positionId'])+'.html'
            com_url = 'http://www.lagou.com/gongsi/'+str(con['companyId'])+'.html'
            yield scrapy.Request(job_url, callback=self.parse_jobinfo, meta=response.meta)
            yield scrapy.Request(com_url, callback=self.parse_cominfo)

    def parse_jobinfo(self, response):
        loader = ItemLoader(item=JobInfoItem(), response=response)
        loader.add_value('job_category', value = response.meta['category'])
        loader.add_value('url', value = response.url)
        loader.add_xpath('job_company', '//*[@id="container"]/div[1]/div[2]/dl/dt/a/div/h2/text()', TakeFirstL())
        loader.add_xpath('job_name', '//*[@id="container"]/div[1]/div[1]/dl/dt/h1/@title', TakeFirstL())
        loader.add_xpath('job_salary', '//dd[@class="job_request"]/span[1]/text()', TakeFirstL())
        loader.add_xpath('job_location', '//dd[@class="job_request"]/span[2]/text()', TakeFirstL())
        loader.add_xpath('job_experience', '//dd[@class="job_request"]/span[3]/text()', TakeFirstL())
        loader.add_xpath('job_miniEdu', '//dd[@class="job_request"]/span[4]/text()', TakeFirstL())
        loader.add_xpath('job_nature', '//dd[@class="job_request"]/span[5]/text()', TakeFirstL())
        loader.add_xpath('job_desc', '//dd[@class="job_bt"]//p', ReplaceBlank(''), RemoveTagsL(), JoinL(''))
        loader.add_xpath('job_desc_req', '//dd[@class="job_bt"]//p', RemoveTagsL(), JoinL('\n'), re=u'职位要求[:：]*(.+)\n\n')
        loader.add_xpath('job_desc_resp', '//dd[@class="job_bt"]//p', JoinL(''), re=u'岗位职责[:：]*(.+)\n\n')
        loader.add_xpath('job_update', '//dd[@class="job_request"]/div/text()', re=u'发布时间：(.+)')
        loader.add_xpath('job_benefits', '//dd[@class="job_request"]/text()', JoinL(''), re=u'职位要求[:：](.+)[:：]*')

        return loader.load_item()

    def parse_cominfo(self, response):

        loader = ItemLoader(item=ComInfoItem(), response=response)
        loader.add_value('url', value=response.url)
        loader.add_xpath('com_name', '//div[@class="company_main"]/h1/a/@title', TakeFirstL())
        loader.add_xpath('com_link', '//div[@class="company_main"]/h1/a/@href', TakeFirstL())
        loader.add_xpath('com_size', '//div[@class="item_content"]/ul/li[@class="scale"]/span/text()', TakeFirstL())
        loader.add_xpath('com_industry', '//div[@class="item_content"]/ul/li[@class="industry"]/span/text()', TakeFirstL())
        loader.add_xpath('com_address', '//li[@class="mlist_ul_li mlist_li_open "]/p[@class="mlist_li_desc"]/text()', ReplaceBlank(), TakeFirstL())
        loader.add_xpath('com_intro', '//div[@class="item_content"]//p/text()', JoinL(''))

        return loader.load_item()

