# -*- coding: utf-8 -*-

import scrapy
from scrapy.http import Request
from scrapy.selector import Selector
from bs4 import BeautifulSoup
import datetime
from Find_job.items import FindJobItem
import re

class Myspider(scrapy.Spider):
    name = "Find_job"

    def start_requests(self):
        for i in range(152):
            # url='https://search.51job.com/list/040000,000000,0000,00,9,99,%2B,2,'+str(i)+'.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
            url= 'https://search.51job.com/list/040000,000000,0000,00,0,99,%25E9%2587%2587%25E8%25B4%25AD,2,'+str(i)+'.html?lang=c&stype=1&postchannel=0000&workyear=99&cotype=99&degreefrom=99&jobterm=99&companysize=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=5&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='

            # print(url)
            yield Request(url)

    def parse(self, response):
        now_time = datetime.datetime.now().strftime('%m-%d')

        body = response.text
        r = Selector(text=body)
        list = r.css('div.el').extract()

        # print(list)
        #获取每条简历的更新日期
        for list_time in list:
            info = Selector(text=list_time)
            get_job_time = info.css('span.t5::text').extract()

            # print(len(get_job_time))
            # print(info)
            # print(get_job_time)
            # print(get_job_time)

            #获取所有招聘信息
            get_job_url = Selector(text=list_time).css('p.t1 span a::attr(href)').extract()


            if len(get_job_url):
                url = get_job_url[0]

               #url = https://jobs.51job.com/shenzhen-baq/99330771.html?s=01&t=0
               # print(url)
                find_id = re.compile(r'\d+')
                url_id = find_id.findall(url)
                get_url_id = url_id[1]
                # print(get_url_id)

                yield Request(url,callback=self.info_page,meta={'id':get_url_id})

            #判断简历更新日期是否等于当前日期
            # if now_time in get_job_time:
            #    print('等于当前日期')
            #    get_job_url = info.css('p.t1 span a::attr(href)').extract()
            #    print(get_job_url)
            #    url = get_job_url[0]
            #    #url = https://jobs.51job.com/shenzhen-baq/99330771.html?s=01&t=0
            #    # print(url)
            #    find_id = re.compile(r'\d+')
            #    url_id = find_id.findall(url)
            #    get_url_id = url_id[1]
            #    print(get_url_id)
            #    yield Request(url,callback=self.info_page,meta={'id':get_url_id})
            # else:
            #    print('不等于当前日期')


    def info_page(self,response):
        i = FindJobItem()
        body = response.text
        head = response.meta
        i['id'] = head['id']

        xiangxi = Selector(text=body)
        zhiwei = xiangxi.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/h1/@title').extract()
        gongsi = xiangxi.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/p[1]/a[1]/@title').extract()
        qian = xiangxi.xpath('/html/body/div[3]/div[2]/div[2]/div/div[1]/strong/text()').extract()
        miaoshu = xiangxi.xpath('/html/body/div[3]/div[2]/div[3]/div[1]/div').extract()
        dizhi = xiangxi.xpath('/html/body/div[3]/div[2]/div[3]/div[2]/div/p/text()').extract()
        info = xiangxi.xpath('/html/body/div[3]/div[2]/div[4]/div[1]/div[2]//text()').extract()
        # print(info)
        i['zhiwei']=zhiwei
        i['gongsi']=gongsi
        i['qian']=qian
        i['miaoshu']=miaoshu
        i['dizhi']=dizhi[1]
        i['xingzhi']=info[1]
        i['guimo']=info[3]
        i['hangye']=info[5]

        yield i
