# -*- coding: utf-8 -*-
import scrapy
import employment.items as items
import re
import datetime
import urllib
import time
import copy


class ZhaopinSpider(scrapy.Spider):
    '''
    智联招聘
    '''
    name = 'zhaopin'
    allowed_domains = ['sou.zhaopin.com', 'jobs.zhaopin.com']
    start_urls = [
        'https://sou.zhaopin.com/?jl=854&in=100030000,100040000,100050000,100100000&kw=Java%E5%BC%80%E5%8F%91&kt=3']

    def parse(self, response):
        position_list = response.css(
            'div#listContent>div.contentpile__content__wrapper')
        print('========================================解析得到{}个职位'.format(
            len(position_list)))
        # 填充item
        for position in position_list:

            item = items.EmploymentItem()
            # [x] 职位名称
            item['positionName'] = position.css(
                "a.contentpile__content__wrapper__item__info::attr(title)").extract_first().strip()
            # [x] 职位薪酬
            positionAward = position.css(
                "p.contentpile__content__wrapper__item__info__box__job__saray::text").extract_first().strip()
            if positionAward.strip() == '薪资面议':
                # 都设置为0代表面议
                item['positionAwardMin'] = 0
                item['positionAwardMax'] = 0
            else:
                # 正则表达式匹配部分
                matchObj = re.search(
                    r'([0-9.]*?)[kK]-([0-9.]*?)[kK]', str(positionAward))
                if matchObj != None:
                    item['positionAwardMin'] = matchObj.group(1)
                    item['positionAwardMax'] = matchObj.group(2)
                else:
                    raise RuntimeError('新的薪酬格式:' + positionAward)

            # [x] 工作经验要求
            exp = position.css(
                "ul.contentpile__content__wrapper__item__info__box__job__demand>li::text").extract()[1].strip()
            if exp.strip() == '经验不限':
                # 若min与max都为-1，则代表经验不限
                item['experienceTimeMin'] = -1
                item['experienceTimeMax'] = -1
            elif exp.strip() == '无经验':
                # 若min与max都为0，则代表为应届生，无经验
                item['experienceTimeMin'] = 0
                item['experienceTimeMax'] = 0
            else:

                if (matchObj := re.search(r'([0-9.]*?)-([0-9.]*?)年', str(exp))) != None:
                    # 例： '经验3-5年'
                    item['experienceTimeMin'] = int(matchObj.group(1))
                    item['experienceTimeMax'] = int(matchObj.group(2))
                elif (matchObj := re.search(r'([0-9.]*?)年以上', str(exp))) != None:
                    # 若max为-1，代表不限
                    # 例： '经验10年以上'
                    item['experienceTimeMin'] = matchObj.group(1)
                    item['experienceTimeMax'] = -1
                elif (matchObj := re.search(r'([0-9.]*?)年以下', str(exp))) != None:
                    # 若min为-1，代表不限
                    item['experienceTimeMin'] = -1
                    item['experienceTimeMax'] = matchObj.group(1)
                else:
                    raise RuntimeError('新的岗位经验要求:'+exp)

            edu_map = {
                '学历不限': 0,
                '初中及以下': 1,
                '中专/中技': 1,
                '高中': 1,
                '大专': 2,
                '本科': 3,
                '硕士': 4,
                '博士': 5,
                'MBA/EMBA': 6,
            }

            # [x] 学历要求
            edu = position.css(
                "ul.contentpile__content__wrapper__item__info__box__job__demand>li::text").extract()[2].strip()
            item['education'] = edu_map.get(edu.strip(), -1)  # 如果没有对应的值则返回-1
            # [x] 公司所在城市
            item['ltdCity'] = position.css(
                "ul.contentpile__content__wrapper__item__info__box__job__demand>li::text").extract()[0].strip()

            # [x] 公司名称
            item['ltdName'] = position.css(
                ".commpanyName a::text").extract_first().strip()
            # [x] 公司规模
            item['ltdScale'] = position.css(
                "div.contentpile__content__wrapper__item__info__box__job__comdec>span::text").extract()[-1].strip()
            # [x] 职位详情链接
            url = position.css(
                'a.contentpile__content__wrapper__item__info::attr(href)').extract_first().strip()
            item['positionLink'] = urllib.parse.urljoin(response.url, url)
            # [x] 公司链接
            url = position.css(
                ".commpanyName a::attr(href)").extract_first().strip()
            item['ltdLink'] = urllib.parse.urljoin(response.url, url)  # 拼接url

            # 到详情页继续收集数据
            yield scrapy.Request(item['positionLink'], callback=self.parse_detail, meta={'item': copy.deepcopy(item)})

        # 翻页
        next_page_css = "#pagination_content button.soupager__btn:nth-of-type(2)"
        yield scrapy.Request(response.url, callback=self.parse, meta={
            'next_page_css': next_page_css,
        }, dont_filter=True)  # 注意防止过滤重复请求

    def parse_detail(self, response):
        item = response.meta.get('item')
        # [x] 发布时间
        # 格式有'13:25',似乎只给我展示当天更新的职位
        positionTime = response.css(
            ".summary-plane__top .summary-plane__time::text").extract_first().strip("更新于 ")

        postDate = datetime.date.today()  # 发布日期
        matchObj = re.search(':', str(positionTime))
        if matchObj != None:
            # 如果格式为'13:25发布'，日期设置为今天
            item['positionTime'] = postDate.strftime("%Y-%m-%d")
        else:
            if (matchObj := re.search(r'(\d*?)月(\d*?)日', str(positionTime))) != None:
                postDate = datetime.date(postDate.year, int(
                    matchObj.group(1)), int(matchObj.group(2)))
                item['positionTime'] = postDate.strftime("%Y-%m-%d")
            else:
                raise RuntimeError('新的时间格式:'+positionTime)
        # [x] 福利待遇
        item['walfare'] = ' '.join(response.css(
            ".highlights span.highlights__content-item::text").extract())
        print('==========================================================')
        print(item)
        print('==========================================================')
        yield item
