# -*- coding: utf-8 -*-
import scrapy
from .. import items
from .. import get_num # 提取数字的库
import datetime
import re


class LiepinSpider(scrapy.Spider):
    name = 'zhilian'
    allowed_domains = ['zhaopin.com']
    start_urls = ['https://fe-api.zhaopin.com/c/i/sou']
    base_url = 'https://fe-api.zhaopin.com/c/i/sou?start=%d'

    custom_settings = {
        # 'DOWNLOAD_DELAY': 1,
        'CONCURRENT_REQUESTS' : 32,
        'DOWNLOADER_MIDDLEWARES': {
            # 数字越小  请求时越先经过，数字越大 响应时越先经过
            # 'cprojectzhilian.mymiddlewares.RandomUserAgent': 998,
            # 'cprojectzhilian.mymiddlewares.RandomProxyMysql': 999,
        },
        'DEFAULT_REQUEST_HEADERS':{
            "User-Agent": " Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.87 Safari/537.36",
            "Cookie": "urlfrom=121113803; urlfrom2=121113803; adfbid=0; adfbid2=0; adfcid=pzzhubiaoti1; adfcid2=pzzhubiaoti1; dywea=95841923.1286720105476607500.1530596996.1530596996.1530596996.1; dywec=95841923; dywez=95841923.1530596996.1.1.dywecsr=other|dyweccn=121113803|dywecmd=cnt|dywectr=%E6%99%BA%E8%81%94%E6%8B%9B%E8%81%98; Hm_lvt_38ba284938d5eddca645bb5e02a02006=1530596996; __utma=269921210.1466604440.1530597001.1530597001.1530597001.1; __utmc=269921210; __utmz=269921210.1530597001.1.1.utmcsr=other|utmccn=121113803|utmcmd=cnt|utmctr=%E6%99%BA%E8%81%94%E6%8B%9B%E8%81%98; __xsptplus30=30.1.1530597003.1530597003.1%231%7Cother%7Ccnt%7C121113803%7C%7C%23%23WIFVpL0ao5g4c4sK9WiBqHnFVzyebLvF%23; ZP_OLD_FLAG=false; sts_deviceid=1645eb10ea844b-0cab00d14b0b9b-5b183a13-1049088-1645eb10ea933; sts_sg=1; zp_src_url=http%3A%2F%2Fts.zhaopin.com%2Fjump%2Findex_new.html%3Futm_source%3Dother%26utm_medium%3Dcnt%26utm_term%3D%26utm_campaign%3D121113803%26utm_provider%3Dzp%26sid%3D121113803%26site%3Dpzzhubiaoti1; sts_sid=1645ee6436a5af-06f4ac28707d34-5b183a13-1049088-1645ee6436cb56; LastCity=%E6%B7%B1%E5%9C%B3; LastCity%5Fid=765; GUID=12db32ce6b96496caca2d7746eda5e1f; ZL_REPORT_GLOBAL={%22sou%22:{%22actionIdFromSou%22:%22ad482f76-b4e0-4641-bf70-1b00a0be776c-sou%22%2C%22funczone%22:%22smart_matching%22}}; Hm_lpvt_38ba284938d5eddca645bb5e02a02006=1530601904; sts_evtseq=15",

        },
        'RETRY_TIMES': 1000,  # 下载器重试次数
        'DOWNLOAD_TIMEOUT': 5  # 3秒以后请求超时
    }

    # page 为不知道多少页
    def parse(self, response):
        data = response.text
        # 构建循环请求
        page_total_pat = re.compile(r'"numFound":(.+?),')
        page_total = page_total_pat.findall(data)[0]
        for i in range(0,6000,60):
            fullurl = self.base_url % i
            yield  scrapy.Request(fullurl, callback=self.parse_list)

    def parse_list(self,response):
        # info_url = response.xpath('div[@class="jobName"]/a/@href').extract()
#         # print(info_url)
        if response.status == 200:
            data = response.text
            info_url_pat = re.compile(r'"positionURL":"(.+?)"',re.S)
            info_url_list = info_url_pat.findall(data)
            for info_url in info_url_list:
                yield scrapy.Request(info_url, callback=self.parse_info)

        else :
            print('你访问的页面不存在')


    def parse_info(self,response):
        item = items.ZhilianItem()

        # 地址
        url=response.url

        # 职位<h1 class="l info-h3">区域销售代表（文具行业）</h1>
        positon = response.xpath('//h1/text()').extract()[0]

        # 工资
        salary = response.xpath('//ul[@class="terminal-ul clearfix"]/li[1]/strong/text()').extract()[0].replace('\xa0','')



        # 工作城市
        location = response.xpath('//ul[@class="terminal-ul clearfix"]/li[2]/strong/a/text()').extract()[0]


        # # 工作经验要求
        years = response.xpath('//ul[@class="terminal-ul clearfix"]/li[5]/strong/text()').extract()[0]

        # # 学历要求
        degree = response.xpath('//ul[@class="terminal-ul clearfix"]/li[6]/strong/text()').extract()[0]

        # 发布日期
        date_pub = response.xpath('//ul[@class="terminal-ul clearfix"]/li[3]/strong/span/text()').extract()[0]
        date_pub = get_num.get_time(date_pub)

        # 工作详情
        jobdesc_data = response.xpath('//div[@class="tab-inner-cont"]')
        jobdesc = jobdesc_data.xpath('string(.)').extract()[0].replace('查看职位地图','').replace('工作地址:','').replace('\n','').replace(' ','')

        # 工作详细地址
        jobaddr = response.xpath('//div[@class="tab-inner-cont"]/h2/text()').extract()[0]

        # 公司名称
        company = response.xpath('//h2/text()').extract()[0]

        # 爬取时间
        crawl_time = datetime.datetime.now().strftime('%Y-%m-%d')

        # 爬虫名称
        spider = self.name

        item['url'] = url
        item['positon'] = positon
        item['salary'] = salary
        item['location'] = location
        item['years'] = years
        item['degree'] = degree
        item['date_pub'] = date_pub
        item['jobdesc'] = jobdesc
        item['jobaddr'] = jobaddr
        item['company'] = company
        item['crawl_time'] = crawl_time
        item['spider'] = spider

        yield item





