import scrapy
from urllib import request
import re
import math
from datetime import  datetime
from  spiderproject.items import ZhilianItem

class Liepin(scrapy.Spider):
    name = 'liepin'
    allowed_domains=[""]
    start_urls = ["https://www.liepin.com/"]
    custom_settings = {
        "DEFAULT_REQUEST_HEADERS": {
            "Host": " www.liepin.com",
            "Connection": " keep-alive",
            "Cache-Control": " max-age=0",
            "Upgrade-Insecure-Requests": " 1",
            "User-Agent": " Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36",
            "Accept": " text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Language": " zh-CN,zh;q=0.9",
            "Cookie": " abtest=0; _fecdn_=1; __uuid=1530584315305.92; _uuid=5DEF3312FFA844180005C159D2471CAE; _mscid=s_00_000; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1530584316,1530584320,1530584326; slide_guide_home_new=1; ADHOC_MEMBERSHIP_CLIENT_ID1.0=b016f77d-056b-ebd0-a800-28c9593703e7; slide_guide_home=1; __tlog=1530584315306.26%7C00000000%7CR000000075%7Cs_00_pz0%7Cs_00_000; verifycode=87e304c79c87464383d5ebd19fc051b3; JSESSIONID=9E539D7466E2D424866FC344A539A39D; __session_seq=37; __uv_seq=37; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1530588441",
        },
        'CONCURRENT_REQUESTS': 20,
        'ITEM_PIPELINES': {
            'spiderproject.pipelines.SpiderprojectPipeline': 1,
        },
    }
    def parse(self, response):
        list_url = response.xpath('//div[@class="industry-side industry-jobs"]//a/@href').extract()
        for url  in list_url:
            full_url = request.urljoin(response.url,url)
            yield scrapy.Request(full_url,callback=self.page,dont_filter=True)

    def page(self,response):
        last_url = response.xpath('//a[@class="last"]//@href').extract()[0].split("=")
        last_url[-1] ='{page}'
        base_url = '='.join(last_url)
        max_url = response.xpath('//a[@class="last"]//@href').extract()[0]
        reg = re.compile(r'curPage=(\d+)')
        max_page = reg.findall(max_url)[1]
        for p in range(1,int(max_page)+1):
            full_url =request.urljoin(response.url, base_url.format(page=p))
            yield scrapy.Request(full_url,callback=self.info_url,dont_filter=True)

    def info_url(self,response):
        info_list = response.xpath('//div[@class="job-info"]/h3/a/@href').extract()
        for i in info_list:
            fullurl = request.urljoin(response.url,i)
            yield scrapy.Request(fullurl,callback=self.parse_info,dont_filter=True)

    def parse_info(self,response):
        item = ZhilianItem()
        try:
            url = response.url
            if 'title-info' in response.text:
                title = response.css('div.title-info h1::text').extract()[0]
            elif 'job-title' in response.text:
                title = response.css('div.job-title h1::text').extract()[0]
            else:
                print(response.url)

            if 'job-item-title' in response.text:
                smoney,emoney = self.edit_gongzi(response.css('p.job-item-title::text').extract()[0])
            elif 'job-main-title' in response.text:
                try:
                    smoney ,emoney = self.edit_gongzi(response.css('p.job-main-title::text').extract()[0])
                except:
                    smoney ,emoney = self.edit_gongzi(response.css('div.job-main-title strong::text').extract()[0])
            else:
                print(response.url)

            try:
                city = response.css('p.basic-infor span a::text').extract()[0]
            except:
                city = ''.join(response.css('p.basic-infor span::text').extract()).strip()
            if 'job-main-tip' in response.text:
                release_time = self.edit_date(response.xpath('//p[@class="job-main-tip"]//span[2]/text()').extract()[0])
            else:
                release_time =self.edit_date( response.css('p.basic-infor time::text').extract()[0])
            job_type = "无"
            if 'resume clearfix' in response.text:
                syear,eyear = self.edit_year(response.xpath('//div[@class="resume clearfix"]//span[2]/text()').extract()[0])
            elif 'job-qualifications' in response.text:
                syear,eyear = self.edit_year(response.xpath('//div[@class="job-qualifications"]//span[2]/text()').extract()[0])

            if 'resume clearfix' in response.text:
                edu = self.edit_edu(response.xpath('//div[@class="resume clearfix"]//span[1]/text()').extract()[0])
            elif 'job-qualifications' in response.text:
                edu = self.edit_edu(response.xpath('//div[@class="job-qualifications"]//span[1]/text()').extract()[0])
            group = title
            content=''.join(response.xpath('//div[@class="content content-word"]//text()').extract())
            adds=  response.xpath('//ul[@class="new-compintro"]//li[3]/text()').extract()[0].strip("公司地址：")
            label = "-".join(response.xpath('//div[@class="tag-list"]//span/text()').extract())

            item["url"] = url
            item["title"] = title
            item["smoney"] = smoney
            item["emoney"] = emoney
            item["city"] = city
            item["release_time"] = release_time
            item["job_type"] = job_type
            item["syear"] = syear
            item["eyear"] = eyear
            item["edu"] = edu
            item["groups"] = group
            item["content"] = content
            item["adds"] = adds
            item["label"] = label
            yield item
        except:
            pass

    # 编辑工资
    def edit_gongzi(self,value):
        if '面议' in value:
            smoney = 9999
            emoney = 9999
        elif  "-" in value:
            reg = re.compile(r'\d+')
            res = reg.findall(value)
            smoney =math.floor ((int(res[0])*10000)/12)
            emoney = math.floor((int(res[1])*10000)/12)
        return smoney,emoney

    #编辑日期
    def edit_date(self,value):
        reg = re.compile(r'\d+')
        if '小时前' in value:
            times = datetime.now().strftime('%Y-%m-%d')
        elif "月前" in value:
            mouth = reg.search(value).group()
            split_date = str(datetime.now()).split("-")
            split_date[1] = str(int(split_date[1]) - int(mouth))
            times = '-'.join(split_date)
            print(times)
        elif '分钟前' in value:
            times = datetime.now().strftime('%Y-%m-%d')
        elif '刚刚' in value:
            times = datetime.now().strftime('%Y-%m-%d')
        elif "昨天" in value:
            split_date = str(datetime.now()).split("-")
            split_day = split_date[2].split(' ')
            split_day[0] = str(int(split_day[0]) - 1)
            split_date[2] = ' '.join(split_day)
            times = '-'.join(split_date)
        else:
            return value
        return times

    #编辑工作经验
    def edit_year(self,value):
        if "年" in value:
            reg = re.compile(r'\d+')
            num = reg.search(value).group()
            syear = int(num)
            eyear = int(num)
        elif "经验不限" in value:
            syear = 0
            eyear = 0
        else:
            syear = 0
            eyear = 0
        return syear,eyear

    #学历
    def edit_edu(self,value):
        if  "及以上" in  value:
            res = value.strip('及以上')
        elif '学历不限' in value:
            res = '无'
        else :
            res= value
        return res