# -*- coding: utf-8 -*-
import scrapy
import employment.items as items
import re
import datetime
import urllib
import time


class LagouSpider(scrapy.Spider):
    name = 'lagou'
    allowed_domains = ['www.lagou.com']
    start_urls = ['https://www.lagou.com/jobs/list_/p-city_0?px=new&hy=%E6%95%B0%E6%8D%AE%E6%9C%8D%E5%8A%A1,%E4%BF%A1%E6%81%AF%E5%AE%89%E5%85%A8,%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD,%E5%8C%BA%E5%9D%97%E9%93%BE,%E7%89%A9%E8%81%94%E7%BD%91,VR%E4%B8%A8AR,%E8%BD%AF%E4%BB%B6%E5%BC%80%E5%8F%91,%E9%80%9A%E8%AE%AF%E7%94%B5%E5%AD%90&isShowMoreIndustryField=true#filterBox']
    # 行业标签：数据服务,信息安全,人工智能,区块链,物联网,VR丨AR,软件开发,通讯电子

    # def start_requests(self):
    #     '''
    #     使用现成的cookie登录，写完之后发现，这个不用登录也能进去
    #     '''
    #     cookies= secretConfig.cookies['lagou'] # 读取配置
    #     cookies={i.split("=")[0]:i.split("=")[1] for i in cookies.split("; ")}
    #     yield scrapy.Request(
    #         self.start_urls[0],
    #         callback=self.parse,
    #         cookies=cookies
    #     )

    def parse(self, response):
        position_list = response.css("#s_position_list ul.item_con_list>li")
        print('开始爬取=====================================================================')
        # 填充item
        for position in position_list:

            item = items.EmploymentItem()
            # [x] 职位名称
            item['positionName'] = position.css(
                "::attr(data-positionname)").extract_first().strip()
            # [x] 职位薪酬
            positionAward = position.css("::attr(data-salary)").extract_first()
            matchObj = re.search(
                r'([0-9.]*?)k-([0-9.]*?)k', str(positionAward))
            if matchObj != None:
                item['positionAwardMin'] = matchObj.group(1)
                item['positionAwardMax'] = matchObj.group(2)
            else:
                raise RuntimeError('type of positionAward:' +
                                   type(positionAward)+positionAward)
            # [x] 发布时间
            # 格式有'13:25发布','3天前发布','2020-11-10'，三种格式，超过三天则直接显示为日期
            positionTime = position.css(
                "span.format-time::text").extract_first().strip()

            postDate = datetime.date.today()  # 发布日期
            matchObj = re.search(':', str(positionTime))
            if matchObj != None:
                # 如果格式为'13:25发布'，日期设置为今天
                item['positionTime'] = postDate.strftime("%Y-%m-%d")
            else:
                matchObj = re.search(r'(\d)天前', str(positionTime))
                if matchObj != None:
                    # 如果格式为'3天前发布'，日期设置为今天减去天数
                    day_num = int(matchObj.group(1))
                    postDate -= datetime.timedelta(days=day_num)
                    item['positionTime'] = postDate.strftime("%Y-%m-%d")
                else:
                    # 如果格式为'2020-11-10'，直接设置
                    item['positionTime'] = positionTime

            # 经验年限与学历要求放在一行，且有一些无关元素如'\n'
            # 经验格式为：'经验不限','经验3-5年','经验10年以上','经验在校/应届'

            exp = ''
            edu = ''
            exp_edu_list = position.css(
                ".list_item_top div.p_bot>div.li_b_l::text").extract()
            for exp_edu in exp_edu_list:
                exp_edu = exp_edu.strip()
                if exp_edu != '':
                    # 若找到含有正确值的元素，则进行提取
                    exp_edu_split = exp_edu.split('/')
                    if len(exp_edu_split) == 3:
                        exp = exp_edu_split[0].strip(
                        )+'/'+exp_edu_split[1].strip()
                    else:
                        exp = exp_edu_split[0].strip()
                    edu = exp_edu_split[-1].strip()  # 取最后一个

                    break

            # [x] 工作经验要求
            print(exp)
            if exp.strip() == '经验不限':
                # 若min与max都为-1，则代表经验不限
                item['experienceTimeMin'] = -1
                item['experienceTimeMax'] = -1
            elif exp.strip() == '经验在校/应届':
                # 若min与max都为0，则代表为应届生
                item['experienceTimeMin'] = 0
                item['experienceTimeMax'] = 0
            else:

                if (matchObj := re.search(r'经验([0-9.]*?)-([0-9.]*?)年', str(exp))) != None:
                    # 例： '经验3-5年'
                    item['experienceTimeMin'] = int(matchObj.group(1))
                    item['experienceTimeMax'] = int(matchObj.group(2))
                elif (matchObj := re.search(r'经验([0-9.]*?)年以上', str(exp))) != None:
                    # 若max为-1，代表不限
                    # 例： '经验10年以上'
                    item['experienceTimeMin'] = matchObj.group(1)
                    item['experienceTimeMax'] = -1
                elif (matchObj := re.search(r'经验([0-9.]*?)年以下', str(exp))) != None:
                    # 还真有这种奇葩岗位要求经验n年以下的
                    # 若min为-1，代表不限
                    item['experienceTimeMin'] = -1
                    item['experienceTimeMax'] = matchObj.group(1)
                else:
                    raise RuntimeError('新的岗位经验要求:'+exp)

            edu_map = {
                '不限': 0,
                '大专': 2,
                '本科': 3,
                '硕士': 4,
                '博士': 5
            }

            # [x] 学历要求
            item['education'] = edu_map.get(edu.strip(), -1)  # 如果没有对应的值则返回-1
            # [x] 公司所在城市
            item['ltdCity'] = position.css(
                ".list_item_top .position .add em::text").extract_first().strip()

            # [x] 福利待遇
            item['walfare'] = position.css(
                ".list_item_bot div.li_b_r::text").extract_first().strip('“”')

            # [x] 公司名称
            item['ltdName'] = position.css(
                "::attr(data-company)").extract_first().strip()
            # [x] 公司规模
            item['ltdScale'] = position.css(
                ".list_item_top div.industry::text").extract_first().split('/')[-1].strip()
            # [x] 职位详情链接
            url = position.css(
                ".list_item_top .position .position_link::attr(href)").extract_first().strip()
            item['positionLink'] = urllib.parse.urljoin(response.url, url)
            # [x] 公司链接
            url = position.css(
                ".list_item_top div.company_name a::attr(href)").extract_first().strip()
            item['ltdLink'] = urllib.parse.urljoin(response.url, url)  # 拼接url
            yield item

        # 翻页
        # 获取总页数

        curNum = response.css(
            '#positionHead .page-number .curNum::text').extract_first()
        totalNum = response.css(
            '#positionHead .page-number .totalNum::text').extract_first()

        if curNum == None or totalNum == None:
            raise RuntimeError('无法获取页数')

        curNum = int(curNum)
        totalNum = int(totalNum)

        if curNum+1 <= totalNum:
            print('==================================================测试=============================================')
            print(curNum)
            print(totalNum)
            print('==================================================测试=============================================')
            next_page_css = ".pager_container span[action=next]"

            yield scrapy.Request(response.url, callback=self.parse, meta={
                'next_page_css': next_page_css,
            }, dont_filter=True)  # 注意防止过滤重复请求

    def nextPage(self, url, pageno):
        '''
        翻页，返回下一页的Request
        @param url: 本页的url
        @param pageno: 页码
        '''
        return scrapy.http.FormRequest(url, formdata={
            'pn': str(pageno),
            'kd': '',
            'first': 'true',
        }, callback=self.parse)
