# -*- coding: utf-8 -*-
import scrapy
from .. import items
from .. import get_num # 提取数字的库
import datetime
import re

class BossSpider(scrapy.Spider):
    name = 'boss'
    allowed_domains = ['zhipin.com']
    start_urls = ['https://www.zhipin.com/c101010100/?page=1&sort=2/']

    custom_settings = {
        # 'DOWNLOAD_DELAY': 8,
        'CONCURRENT_REQUESTS' : 10,
        'DOWNLOADER_MIDDLEWARES': {
            # 数字越小  请求时越先经过，数字越大 响应时越先经过
            'cprojectboss.mymiddlewares.RandomUserAgent': 998,
            'cprojectboss.mymiddlewares.RandomProxyMysql': 999,
        },

        'RETRY_TIMES': 1000,  # 下载器重试次数
        'DOWNLOAD_TIMEOUT': 5  # 3秒以后请求超时
    }

    # page 为10页
    # def parse(self, response):
    #     # 构建循环请求
    #     for i  in  range(11,1,-1):
    #         fullurl = self.base_url % i
    #         yield  scrapy.Request(fullurl, callback=self.parse_list)

    def parse(self, response):
        for infourl in response.xpath('//div[@class="info-primary"]/h3/a'):
            yield response.follow(infourl, self.parse_info)
        # 构造下一页的路由
        next_page = response.xpath('//a[@class="next"]/@href').extract()[0]
        next_page_url = 'https://www.zhipin.com' + next_page
        if next_page:
            yield response.follow(next_page_url, callback=self.parse)


# -------------------错误的代码-----------------------
    # def parse_list(self,response):
    #
    #     # a= response.xpath('//div[@class="info-primary"]//@href').extract()
    #     for infourl in response.xpath('//div[@class="info-primary"]/h3/a'):
    #         yield response.follow(infourl,self.parse_info)

    def parse_info(self,response):
        # print(response.text)
        item = items.BossItem()
        # 网址
        url = response.url
        data = response.xpath('//div[@class="job-primary detail-box"]//div[@class="info-primary"]')
        # 职位
        positon = data.xpath('//div[@class="name"]/h1/text()').extract()[0]
        # 工资范围
        salary = data.xpath('//span[@class="badge"]/text()').extract()[0]

        # 最小工资 最小工资
        # salary_min, salary_max = get_num.get_num(salary)

        # 取学历 工资 经验
        # data = response.css('div.info-primary > p::text').extract()
        # print(data)

        data0= data.xpath('//div[@class="info-primary"]').extract()[0]

        # 地址
        location_pat = re.compile(r'>城市：(.+?)<')
        location = location_pat.findall(data0)[0]

        # 工作经验
        years_pat = re.compile(r'经验：(.+?)<')
        years = years_pat.findall(data0)[0]
        # syears ,eyears = get_num.work_year(years)


        #  学历要求
        degree_pat = re.compile(r'学历：(.+?)</p>')
        degree = degree_pat.findall(data0)[0]

        # 发布时间<span class="time">发布于2018-06-29 19:12</span>
        date_pub = data.xpath('//span[@class="time"]/text()').extract()[0].strip('发布于').split(' ')[0]
        date_pub = get_num.get_time(date_pub)

        # 工作详情
        jobdesc_data = response.xpath(r'//div[@class="detail-content"]')
        jobdesc = jobdesc_data.xpath('string(.)').extract()[0].replace('\n','').replace(' ','')

        #工作地址
        jobaddr = response.xpath(r'//div[@class="location-address"]/text()').extract()[0].replace(' ','')

        # 公司介绍
        company = response.xpath(r'//div[@class="job-sec"]//div[@class="name"]/text()').extract()[0]
        # company_data = response.xpath(r'//div[@class="job-sec company-info"]')
        # company = company_data.xpath('string(.)').extract()[0].replace('\n','').replace(' ','')

        # 爬取的时间
        crawl_time = datetime.datetime.now().strftime('%Y-%m-%d')

        # 爬虫名
        spider = self.name

        item['url'] = url
        item['positon'] = positon
        item['salary'] = salary
        item['location'] = location
        item['years'] = years
        item['degree'] = degree
        item['date_pub'] = date_pub
        item['jobdesc'] = jobdesc
        item['jobaddr'] = jobaddr
        item['company'] = company
        item['crawl_time'] = crawl_time
        item['spider'] = spider

        yield item



