# -*- coding: utf-8 -*-
import scrapy
import employment.items as items
import re
import urllib
import copy


class BossSpider(scrapy.Spider):
    name = 'boss'
    allowed_domains = ['www.zhipin.com']
    start_urls = ['http://www.zhipin.com/c101110100-p100901/']

    def parse(self, response):

        # 获取当前页的职位所有内容
        positionList = response.css(".job-list ul li")
        for pL in positionList:
            item = items.EmploymentItem()
            # [x] 职称名称
            item['positionName'] = pL.css(
                ".job-title .job-name a::text").extract_first()
            # [x] 职位薪酬
            positionAward = pL.css(".job-limit .red::text").extract_first()
            matchObj = re.search(
                r'([0-9.]*?)-([0-9.]*?[kK])', str(positionAward))
            if matchObj != None:
                item['positionAwardMin'] = matchObj.group(1)
                item['positionAwardMax'] = matchObj.group(2)
            else:
                raise RuntimeError('error positionAward:' + positionAward)

            # [x] 工作经验要求

            experienceTime = pL.css(".job-limit p::text")[0].extract()
            if experienceTime.strip() == '经验不限':
                # 若min与max都为-1，则代表经验不限
                item['experienceTimeMin'] = -1
                item['experienceTimeMax'] = -1
            elif experienceTime.strip() == '经验在校/应届':
                # 若min与max都为0，则代表为应届生
                item['experienceTimeMin'] = 0
                item['experienceTimeMax'] = 0
            else:
                matchObj = re.search(
                    r'([0-9.]*?)-([0-9.]*?)年', str(experienceTime))
                if matchObj != None:
                    item['experienceTimeMin'] = int(matchObj.group(1))
                    item['experienceTimeMax'] = int(matchObj.group(2))
                else:
                    raise RuntimeError('新的岗位经验要求:'+experienceTime)

            # [x] 学历要求
            item['education'] = pL.css(".job-limit p::text")[1].extract()
            # [x] 公司所在城市
            item['ltdCity'] = pL.css(
                ".job-title .job-area-wrapper .job-area::text").extract_first()
            # [x] 福利待遇
            item['walfare'] = pL.css(
                ".info-append .info-desc::text").extract_first()
            # [x] 公司名称
            item['ltdName'] = pL.css(
                ".info-company .company-text .name a::text").extract_first()
            # # [x] 公司规模
            # 公司规模见下面的详情页面
            # item['ltdScale'] = pL.css(
            #     ".info-company .company-text").extract_first()
            # [x] 职位详情链接
            url = pL.css(
                ".job-title .job-name a::attr(href)").extract_first()
            item['positionLink'] = urllib.parse.urljoin(response.url, url)

            # [x] 公司链接
            url = pL.css(
                ".info-company .company-text .name a::attr(href)").extract_first()
            item['ltdLink'] = urllib.parse.urljoin(response.url, url)
            # yield item
            # yield scrapy.Request(item['positionLink'], callback=self.parse_content, meta={'data': item})
            # 新开页面并到详情页继续收集数据
            yield scrapy.Request(item['positionLink'], callback=self.parse_content, meta={'item': copy.deepcopy(item)})

    def parse_content(self, response):
        '''
        在职位详情页解析公司规模
        '''
        item = response.meta['data']  # 获取传入的参数
        # [x] 发布时间
        positionTime = response.css(
            ".job-box .job-sider .sider-company .gray::text").extract_first()
        # 处理中文字符和冒号
        item['positionTime'] = re.sub(
            '[\u4e00-\u9fa5]*:', "", str(positionTime))
        # [x] 公司规模
        item['ltdScale'] = response.css(
            ".con .rightCon .item_con .comp_baseInfo_scale::text").extract_first()
        yield item
