# -*- coding: utf-8 -*-
import datetime
import scrapy
from chinahr.items import JobInfoHLJ
# 猎聘的spider，爬取黑龙江
__author__ = 'lx'


# 猎聘网spider
class LiepinCrawlSpider(scrapy.Spider):

    name = 'liepin'  # spider名称
    allowed_domains = ['liepin.com']  # 限制域名
    # start_urls = ['http://www.liepin.com/it/?imscid=R000000030',   # 起始urls
    #               'http://www.liepin.com/realestate/?imscid=R000000031',
    #               'http://www.liepin.com/financial/?imscid=R000000032',
    #               'http://www.liepin.com/consumergoods/?imscid=R000000033',
    #               'http://www.liepin.com/automobile/?imscid=R000000034',
    #               'http://www.liepin.com/medicine/?imscid=R000000054',
    #               ]

    start_urls = ['http://www.liepin.com/it/?imscid=R000000030']

    # 处理起始urls的response
    def parse(self, response):
        for i in range(0, 68):
            url = 'https://www.liepin.com/zhaopin/?pubTime=&salary=&searchType=1&clean_condition=&jobKind=&isAnalysis=&init=-1&sortFlag=15&searchField=1&key=&industries=&jobTitles=&dqs=160&compscale=&compkind=&ckid=bbc03314b6ae960d&curPage=' + str(i)
            yield scrapy.Request(url=url, callback=self.parse_flip)

    def parse_flip(self, response):
        urls = response.xpath("//div[@class = 'job-info']/h3/a/@href").extract()
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse_info)

    # def parse(self, response):
    #     yield scrapy.Request(url='https://job.liepin.com/411_4111932/', callback=self.parse_info)

    # 抓取 职位信息 和 公司信息
    def parse_info(self, response):
        jobinfo = JobInfoHLJ()
        jobinfo['url'] = response.url
        jobinfo['insert_time'] = datetime.datetime.now()

        if 'job.liepin.com' in response.url:
            if u'该职位已结束' in response.body:
                jobinfo['job_req'] = '|'.join([i.strip() for i in response.xpath("//div[@class = 'resume clearfix']//text()").extract()])
                jobinfo['job_location'] = ''.join(response.xpath("//p[@class='basic-infor']/span[1]/text()").extract()).strip()
                jobinfo['job_update'] = ''.join(response.xpath("//p[@class='basic-infor']/span[2]/text()").extract()).strip()
                jobinfo['job_com_info'] = ''.join(response.xpath("//div[@class = 'job-main main-message  over-main']//text()").extract()).replace(' ', '').strip()
                jobinfo['job_com_name'] = ''.join(response.xpath("//div[@class='title-info  over']/h3/a/text()").extract()).strip()
                jobinfo['job_detail'] = ''.join(response.xpath("//div[@class='job-main main-message  over-main'][2]//text()").extract()).replace(' ', '')
                jobinfo['job_info'] = ''.join(response.xpath("//div[@class='job-main main-message  over-main'][1]//text()").extract()).replace(' ', '').strip()
                jobinfo['job_name'] = response.xpath("//div[@class='title-info  over']/h1/text()").extract_first()
            else:
                jobinfo['job_name'] = response.xpath("//div[@class='title-info ']/h1/text()").extract_first()
                jobinfo['job_req'] = '|'.join([i.strip() for i in response.xpath("//div[@class = 'resume clearfix']//text()").extract()])
                jobinfo['job_info'] = ''.join(response.xpath("//div[@class='job-main main-message '][1]//text()").extract()).replace(' ', '').strip()
                jobinfo['job_detail'] = ''.join(response.xpath("//div[@class='job-main main-message '][2]//text()").extract()).replace(' ', '')
                jobinfo['job_com_info'] = ''.join(response.xpath("//div[@class = 'job-main main-message noborder ']//text()").extract()).replace(' ', '').strip()
                jobinfo['job_com_name'] = ''.join(response.xpath("//div[@class='title-info ']/h3/a/text()").extract()).strip()
                jobinfo['job_location'] = ''.join(response.xpath("//p[@class='basic-infor']/span[1]/text()").extract()).strip()
                jobinfo['job_update'] = ''.join(response.xpath("//p[@class='basic-infor']/span[2]/text()").extract()).strip()
            yield jobinfo
        elif u'该职位已结束' not in response.body:
            jobinfo['job_name'] = response.xpath("//div[@class='title-info ']/h1/text()").extract_first()
            jobinfo['job_info'] = ''.join(response.xpath("//div[@class='job-main main-message'][1]//text()").extract()).replace(' ', '').strip()
            # print jobinfo['job_info']
            jobinfo['job_detail'] = ''.join(response.xpath("//div[@class='job-main main-message'][2]//text()").extract()).replace(' ', '')
            jobinfo['job_salary'] = ''.join(response.xpath("//div[@class='job-main main-message'][3]//text()").extract()).replace(' ', '').strip()
            jobinfo['job_benefits'] = jobinfo['job_salary']
            jobinfo['job_com_info'] = ''.join(response.xpath("//div[@class = 'job-main noborder main-message']//text()").extract()).replace(' ', '').strip()
            jobinfo['job_com_name'] = ''.join(response.xpath("//div[@class='title-info ']/h3/text()").extract()).strip()
            jobinfo['job_req'] = '|'.join([i.strip() for i in response.xpath("//div[@class = 'resume clearfix']//text()").extract()])
            jobinfo['job_location'] = response.xpath("//p[@class='basic-infor']/span[1]/text()").extract_first()
            jobinfo['job_update'] = response.xpath("//p[@class='basic-infor']/span[2]/text()").extract_first()
            yield jobinfo
        # print jobinfo['job_name']
        else:
            jobinfo['job_req'] = '|'.join([i.strip() for i in response.xpath("//div[@class = 'resume clearfix']//text()").extract()])
            jobinfo['job_location'] = response.xpath("//p[@class='basic-infor']/span[1]/text()").extract_first()
            jobinfo['job_update'] = response.xpath("//p[@class='basic-infor']/span[2]/text()").extract_first()
            jobinfo['job_name'] = response.xpath("//div[@class='title-info over']/h1/text()").extract_first()
            jobinfo['job_info'] = ''.join(response.xpath("//div[@class='job-main job-main-left over-main'][1]//text()").extract()).replace(' ', '').strip()
            jobinfo['job_detail'] = ''.join(response.xpath("//div[@class='job-main job-main-left over-main'][2]//text()").extract()).replace(' ', '')
            jobinfo['job_salary'] = ''.join(response.xpath("//div[@class='job-main job-main-left over-main'][3]//text()").extract()).replace(' ', '').strip()
            jobinfo['job_com_info'] = ''.join(response.xpath("//div[@class = 'job-main noborder job-main-left over-main']//text()").extract()).replace(' ', '').strip()
            jobinfo['job_benefits'] = jobinfo['job_salary']
            jobinfo['job_com_name'] = ''.join(response.xpath("//div[@class='title-info over']/h3/text()").extract()).strip()
            # print jobinfo['job_com_info']
            yield jobinfo
