# -*- coding: utf-8 -*-
import scrapy
import scrapy
from .. import items
from .. import get_num # 提取数字的库
import datetime
import re

class LiepinSpider(scrapy.Spider):
    name = 'liepin'
    allowed_domains = ['liepin.com']
    start_urls = ['https://www.liepin.com/zhaopin/?pubTime=7']


    custom_settings = {
        'DOWNLOAD_DELAY': 0.5,
        'CONCURRENT_REQUESTS' : 1,
        'DOWNLOADER_MIDDLEWARES': {
            # 数字越小  请求时越先经过，数字越大 响应时越先经过
            'cprojectliepin.mymiddlewares.RandomUserAgent': 998,
            # 'cprojectboss.mymiddlewares.RandomProxyMysql': 999,
        },
        'DEFAULT_REQUEST_HEADERS': {
            "Cookie": " abtest=0; _fecdn_=1; __uuid=1530545238732.36; __tlog=1530545238732.30%7C00000000%7C00000000%7Cs_00_pz0%7Cs_00_pz0; _mscid=s_00_pz0; _uuid=5B9BFD28FFA842183324CA967F773C16; Hm_lvt_a2647413544f5a04f00da7eee0d5e200=1530545242; slide_guide_home_new=1; slide_guide_home=1; verifycode=da374f7141be4a90aa77ceda1f37f2c2; JSESSIONID=01CCABAFC352577C89F50F5ABE40BB8A; ADHOC_MEMBERSHIP_CLIENT_ID1.0=169e55df-1ef9-eb88-5360-82dcb1abf239; firsIn=1; __session_seq=19; __uv_seq=19; Hm_lpvt_a2647413544f5a04f00da7eee0d5e200=1530545849"
        },
        'RETRY_TIMES': 1000,  # 下载器重试次数
        'DOWNLOAD_TIMEOUT': 5  # 3秒以后请求超时
    }

    # page 为100页
    def parse(self, response):
        for infourl in response.xpath('//div[@class="job-info"]/h3/a'):
            yield response.follow(infourl, self.parse_info)
        # 构造下一页的路由
        next_page = response.xpath('//a[contains(text(),"下一页")]/@href').extract_first()
        next_page = 'https://www.liepin.com' + next_page
        if next_page:
            yield scrapy.Request(next_page, callback=self.parse)

#----------------------错误的代码------------------------------------
        # 构建循环请求 ( 错误)
        # for i  in  range(3,-1,-1):
        #     fullurl = self.base_url % i
        #     yield  scrapy.Request(fullurl, callback=self.parse_list)

    # def parse_list(self,response):
    #     print(response.text)
    #     # a= response.xpath('//div[@class="info-primary"]//@href').extract()
    #
    #         yield response.follow(infourl,self.parse_info)
    #         # yield scrapy.Request(infourl, callback=self.parse_info,priority=1)


    def parse_info(self,response):
        item = items.LiepinItem()
        url = response.url

        # 职位
        positon = response.xpath('//h1/text()').extract()[0]

        #工资:
        salary = response.xpath('//p[@class="job-item-title"]/text()').extract()[0]

        # 地址
        locations = response.xpath('//p[@class="basic-infor"]/span/a/text()').extract()
        if len(locations) > 0 :
            location = locations[0]
        else:
            location = ''


        # 工作经验
        years = response.xpath('//div[@class="job-qualifications"]/span[2]/text()').extract()[0]

        # 学历要求
        degree = response.xpath('//div[@class="job-qualifications"]/span[1]/text()').extract()[0]

        # 发布时间
        date_pub = response.xpath('//time/@title').extract()[0]
        date_pub = get_num.get_time(date_pub)

        # 工作详情
        jobdesc_data = response.xpath('//div[@class="content content-word"]')
        jobdesc = jobdesc_data.xpath('string(.)').extract()[0]

        # 工作地点
        jobaddr = response.xpath('//ul[@class="new-compintro"]/li[last()]/text()').extract()[0]

        # 公司
        company = response.xpath('//h3/a/text()').extract()[0]

        # 爬取时间
        crawl_time =  datetime.datetime.now().strftime('%Y-%m-%d')

        # 爬虫名
        spider= self.name

        item['url'] = url
        item['positon'] = positon
        item['salary'] = salary
        item['location'] = location
        item['years'] = years
        item['degree'] = degree
        item['date_pub'] = date_pub
        item['jobdesc'] = jobdesc
        item['jobaddr'] = jobaddr
        item['company'] = company
        item['crawl_time'] = crawl_time
        item['spider'] = spider

        yield item