# -*- coding: utf-8 -*-
import scrapy
import employment.secretConfig as secretConfig  # 保存密码等配置，此配置文件不用git管理，请自行在settings同目录下创建该模块，并设置对应的dict
import employment.items as items
import re
import datetime
import urllib
import time
import copy

class DajieSpider(scrapy.Spider):
    name = 'dajie'
    allowed_domains = ['www.dajie.com']
    start_urls = ['https://so.dajie.com/job/search?keyword=java&from=job&clicktype=blank/']

    def parse(self, response):
        ptl = response.css(".jobList ul>li")
        print('开始爬取=====================================================================')
        # 填充item
        for pos in ptl:
            
            item = items.EmploymentItem()
            # [x] 职位名称
            item['positionName'] = pos.css("p a.jobName::text").extract_first()
            # [x] 职位薪酬
            #item['positionAwardMin'] =pos.css("p span.money::text").extract_first()
            positionAward= pos.css("p span.money::text").extract_first()
            matchObj1 = re.search( r'([0-9.]*?)K-([0-9.]*?)K/月', str(positionAward))
            matchObj2 =re.search( r'([0-9.]*?)元/天', str(positionAward))
            matchObj3 = re.search( r'([0-9.]*?)K+/月', str(positionAward))
            if positionAward=="面议":
                item['positionAwardMin']=0
                item['positionAwardMax']=0
            elif matchObj1:
                # matchObj = re.search( r'([0-9.]*?)K-([0-9.]*?)K/月', str(positionAward))
                # if matchObj != None: 
                item['positionAwardMin'] = matchObj1.group(1)
                item['positionAwardMax'] = matchObj1.group(2)
             

            elif matchObj2:
                item['positionAwardMin'] = matchObj2.group(1)
                item['positionAwardMax'] = matchObj2.group(1)*30
                # else:
                #     raise RuntimeError('positionAward:'+positionAward) 
            elif matchObj3:
                
                
                item['positionAwardMin'] = matchObj3.group(1)
                item['positionAwardMax'] = -1
            else:
                print("错误")
                item['positionAwardMax'] = -1
                item['positionAwardMin']=-1

            # [x]无 发布时间
     

            # 经验年限与学历要求放在一行，且有一些无关元素如'\n'
            # 经验格式为：'经验不限','经验3-5年','经验10年以上','经验在校/应届'

            # [x] 工作经验要求
            item['experienceTimeMin'] = '2020-11-17'
            exp='不限'
            if pos.css("p span.suffer::text").extract_first()!=None:
                exp=pos.css("p span.suffer::text").extract_first()
            else:
                item['experienceTimeMin']=-1
                item['experienceTimeMax']=-1
            print(exp)
            if exp=='不限':
                # 若min与max都为-1，则代表经验不限
                item['experienceTimeMin'] = -1
                item['experienceTimeMax'] = -1
            elif exp=='经验在校/应届':
                # 若min与max都为0，则代表为应届生
                item['experienceTimeMin'] = 0
                item['experienceTimeMax'] = 0
            elif (matchObj := re.search(r'([0-9.]*?)年及以上',str(exp)))!=None:
                    # 若max为-1，代表不限
                    # 例： '经验10年以上'
                item['experienceTimeMin'] = matchObj.group(1)
                item['experienceTimeMax'] = -1
                # elif (matchObj := re.search(r'([0-9.]*?)年及以下',str(exp)))!=None:
                #     # 还真有这种奇葩岗位要求经验n年以下的
                #     # 若min为-1，代表不限
                #     item['experienceTimeMin'] = -1
                #     item['experienceTimeMax'] = matchObj.group(1)
            else:
                item['experienceTimeMin']=-1
                item['experienceTimeMax']=-1
               
            # [x] 学历要求
            edu_map = {
                '不限':0,
                '大专':2,
                '本科':3,
                '硕士':4,
                '博士':5
            }
            edu='不限'
            if pos.css("p span.edu::text").extract_first()!=None:
                 edu = pos.css("p span.edu::text").extract_first() 
            item['education'] = edu_map.get(edu.strip(),-1)
            # [x] 公司所在城市
            item['ltdCity'] = pos.css("p span.ads::text").extract_first() 
            
            # [x] 无福利待遇
            #item['walfare'] = position.css(".list_item_bot div.li_b_r::text").extract_first().strip('“”')

            # [x] 公司名称
            item['ltdName'] = pos.css("div.companyMsg a.companyName::text").extract_first()
            # [x] 公司规模
            item['ltdScale'] = pos.css("div.companyMsg span.personNum::text").extract_first().split('/')[-1].strip()
            

            
            # [x] 公司链接
            url = pos.css("div.companyMsg a.companyName::attr(href)").extract_first()
            item['ltdLink'] = urllib.parse.urljoin(response.url, url)  # 拼接url
            # [x] 职位详情链接
            url = pos.css("div.boxCenter a.jobName::attr(href)").extract_first()
            #item['positionLink'] = urllib.parse.urljoin(response.url, url)
            item['positionLink']='https://job.dajie.com/f9767a40-400c-4972-836f-370d510702f3.html?jobsearch=0&pagereferer=blank&keyword=java&clicktype=job'
            print(item['positionLink'])
            yield scrapy.Request(item['positionLink'], callback=self.parse_detail, meta={'item': copy.deepcopy(item)})
            #yield item
        # 翻页
        # 获取总页数
        
        # curNum =int(response.css("#page-con.paging span.current::text")[0].extract())

        # # if curNum==None or totalNum==None:
        # #     raise RuntimeError('无法获取页数')

        # totalNum=int(response.css("#page-con.paging::attr(lastpage)").extract_first())

        # if curNum+1<=totalNum:
        #     print('==================================================测试=============================================')
        #     print(curNum)
        #     print(totalNum)
        #     print('==================================================测试=============================================')
        #     next_page_css ="#page-con.paging a.next"
            
        #     yield scrapy.Request(response.url, callback=self.parse,meta={
        #         'next_page_css':next_page_css,
        #         #'next_page_num':curNum+1,# 解决另一个问题：总是回到第一页，然后翻页到第二页就停下来了
        #         },dont_filter=True)# 注意防止过滤重复请求

    def parse_detail(self, response):
        item = response.meta.get('item')
        print("这里输出了")
        wal=response.css("#jp_maskit > div.job-msg-bottom > ul li::text").extract_first()
        print('1111')

        #walfare=','.join(wal)
        print('22222')
        #ltdScale = response.css("#job-view-enterprise > div.wrap.clearfix > div.clearfix > div.side > div:nth-child(2) > div > div.company-infor > div > ul.new-compintro > li:nth-child(2)::text").extract_first()
        item['walfare']= wal
        print(wal)
        print('==========================================================')
        print(item)
        print('==========================================================')
        yield item
    # def nextPage(self,url,pageno):
    #     '''p
    #     翻页，返回下一页的Request
    #     @param url: 本页的url
    #     @param pageno: 页码
    #     '''
    #     return scrapy.http.FormRequest(url,formdata={
    #         'pn':str(pageno),
    #         'kd':'',
    #         'first':'true',
    #         },callback=self.parse)