# -*- coding: utf-8 -*-
import scrapy
import employment.items as items
import re
import time
import copy
import urllib
class LiepinSpider(scrapy.Spider):
    name = 'liepin'
    allowed_domains = ['www.liepin.com']
    start_urls = ['https://www.liepin.com/zhaopin/?industries=&subIndustry=&dqs=&salary=&jobKind=&pubTime=&compkind=&compscale=&searchType=1&isAnalysis=&sortFlag=15&d_headId=8945cc0a712e455e427c5c7251123217&d_ckId=8945cc0a712e455e427c5c7251123217&d_sfrom=search_fp&d_curPage=0&d_pageSize=40&siTag=I-7rQ0e90mv8a37po7dV3Q%7EfA9rXquZc5IkJpXC-Ycixw&key=python']
    
    # def parse_content(self,response):
    #         item=rsponse.meta['data']
    #         item['ltdScale']=response.css("div.side div.right-post-top ul.new-compintro li")[1].extract()
    #         yield item
    def parse(self, response):
        position__List = response.css("div.sojob-result ul.sojob-list li")
        for pLL in position__List:
            item = items.EmploymentItem()
            # 职位名称
            positionName= pLL.css("a::text").extract_first()
            item['positionName']=positionName.strip()
            # 职位薪酬
            positionAward= pLL.css("p .text-warning::text").extract_first()
            if positionAward=="面议":
                item['positionAwardMin']=0
                item['positionAwardMax']=0
            else:
                matchObj = re.search( r'([0-9.]*?)-([0-9.]*?)k·([0-9.]*?)薪', str(positionAward))
                if matchObj != None: 
                     item['positionAwardMin'] = matchObj.group(1)
                     item['positionAwardMax'] = matchObj.group(2)
                     m=matchObj.group(3)
                else:
                     raise RuntimeError('positionAward:'+positionAward) 
            # 发布时间
            positionTime = pLL.css("time::attr(title)").extract_first()
            Time=re.search( r'([0-9.]*?)年([0-9.]*?)月([0-9.]*?)日', str(positionTime))
            new_date = Time.group(1) + '-' + Time.group(2) + '-' + Time.group(3)
            st = time.strptime(new_date, '%Y-%m-%d')
            update_date = time.strftime('%Y-%m-%d', st)
            item['positionTime']=update_date

            str1 = pLL.css("p::attr(title)").extract_first()
            # 工作经验要求
            exp=str1.split('_')[3]
            print(exp)
            if exp.strip()=='经验不限':
                # 若min与max都为-1，则代表经验不限
                item['experienceTimeMin'] = -1
                item['experienceTimeMax'] = -1
            elif exp.strip()=='经验在校/应届':
                # 若min与max都为0，则代表为应届生
                item['experienceTimeMin'] = 0
                item['experienceTimeMax'] = 0
            else:
                
                if (matchObj := re.search(r'([0-9.]*?)-([0-9.]*?)年',str(exp))) !=None:
                    # 例： '经验3-5年'
                    item['experienceTimeMin'] = int(matchObj.group(1))
                    item['experienceTimeMax'] = int(matchObj.group(2))
                elif (matchObj := re.search(r'([0-9.]*?)年以上',str(exp)))!=None:
                    # 若max为-1，代表不限
                    # 例： '经验10年以上'
                    item['experienceTimeMin'] = matchObj.group(1)
                    item['experienceTimeMax'] = -1
                elif (matchObj := re.search(r'([0-9.]*?)年以下',str(exp)))!=None:
                    # 还真有这种奇葩岗位要求经验n年以下的
                    # 若min为-1，代表不限
                    item['experienceTimeMin'] = -1
                    item['experienceTimeMax'] = matchObj.group(1)
                else:
                    raise RuntimeError('新的岗位经验要求:'+exp)
            # 公司所在城市
            ltdCity = str1.split('_')[1]
            if (matchCity := re.match( r'(.*)-(.*?)', str(ltdCity)))!=None:
                item['ltdCity']=ltdCity.split('-')[0]+'.'+ltdCity.split('-')[1]
            else:
                item['ltdCity']= str1.split('_')[1]
            
            # 学历要求

            edu_map = {
                '不限':0,
                '大专及以上':2,
                '统招本科':3,
                '本科及以上':4
            }
            #item['education'] = str1.split('_')[2]
            edu=str1.split('_')[2]
            item['education'] = edu_map.get(edu.strip(),-1)# 如果没有对应的值则返回-1
            # 福利待遇
            item['walfare'] = ' '.join(pLL.css("p.temptation span::text").extract())
            # 公司名称
            item['ltdName'] = pLL.css("p.company-name a::text").extract_first()
            # str1 = pL.css(".er .dc::text").extract_first()
            # 公司规模
            # ltdScale = pLL.css("p.field-financing span::text" ).extract_first()
            # item['ltdScale']=ltdScale.strip()
            # 职位详情链接
            positionLink = pLL.css("h3 a::attr(href)").extract_first()
            item['positionLink']=positionLink
            #url = pL.css("a.el::attr(href)").extract_first()
            # 公司链接
            item['ltdLink']=pLL.css("p.company-name a::attr(href)").extract_first()
            #yield item
            yield scrapy.Request(item['positionLink'], callback=self.parse_detail, meta={'item': copy.deepcopy(item)})
           # yield scrapy.Request(item['positionLink'],callback=self.parse_content,meta={'data':item})
        
        
        # 翻页
        # 获取总页数
        
        curNum = response.css('div.pager .pagerbar a.current::text').extract_first()

       # if curNum==None or totalNum==None:
         #   raise RuntimeError('无法获取页数')

        curNum=int(curNum)
        totalNum=10

        if curNum+1<=totalNum:
            print('==================================================测试=============================================')
            print(curNum)
            print(totalNum)
            print('==================================================测试=============================================')
            next_page_css ="div.pager .pagerbar a:nth-last-child(2)"
            
            yield scrapy.Request(response.url, callback=self.parse,meta={
                'next_page_css':next_page_css,
                #'next_page_num':curNum+1,# 解决另一个问题：总是回到第一页，然后翻页到第二页就停下来了
                },dont_filter=True)# 注意防止过滤重复请求

    def parse_detail(self, response):
        item = response.meta.get('item')
        ltdScale = response.css("#job-view-enterprise > div.wrap.clearfix > div.clearfix > div.side > div:nth-child(2) > div > div.company-infor > div > ul.new-compintro > li:nth-child(2)::text").extract_first()
        item['ltdScale']=ltdScale.split('：')[1]
        #    item['ltdScale']=ltdScale.split('-')[1]+'.'+str2.split('-')[1]
        print('==========================================================')
        print(item)
        print('==========================================================')
        yield item
    def nextPage(self,url,pageno):
        '''
        翻页，返回下一页的Request
        @param url: 本页的url
        @param pageno: 页码
        '''
        return scrapy.http.FormRequest(url,formdata={
            'pn':str(pageno),
            'kd':'',
            'first':'true',
            },callback=self.parse)
    