# -*- coding: utf-8 -*-
# 使用模板爬取页面
import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
from Recruit.items import LiePingItem
# scrapy.Spider
class LiepingSpider(CrawlSpider):
    name = 'lieping'
    allowed_domains = ['liepin.com']
    start_urls = ['https://www.liepin.com/']
    # start_urls = ['https://www.liepin.com/zhaopin/?key=java&d_sfrom=search_industry']
    rules = (
        # 获取首页所有分类职位
        Rule(LinkExtractor(allow=r'https://www.liepin.com/job/\d+.shtml'), callback='parse_detail', follow=False),
        Rule(LinkExtractor(allow=r'https://www.liepin.com/zhaopin/\?key(.*?)search_industry'),follow=True),
    )

    # 使用Start_requests方式

    # def start_requests(self):
    #     # base_url = 'https://www.liepin.com/zhaopin/?pubTime=&ckid=ad703f00d93f28c9&fromSearchBtn=2&compkind=&isAnalysis=&init=-1&searchType=1&dqs=&industryType=&jobKind=&sortFlag=15&degradeFlag=0&industries=&salary=&compscale=&key=%E7%AF%AE%E7%90%83&clean_condition=&headckid=ad703f00d93f28c9&d_pageSize=40&siTag=6i7tH7XhIsd9_mjuw2FoOQ~fA9rXquZc5IkJpXC-Ycixw&d_headId=7b5fbb2d787462d6aba13cb8f6fe4d26&d_ckId=7b5fbb2d787462d6aba13cb8f6fe4d26&d_sfrom=search_prime&d_curPage=0&curPage={}'
    #     # base_url = 'https://www.liepin.com/zhaopin/?ckid=91cd8f5ec5e26e2a&fromSearchBtn=2&degradeFlag=0&init=-1&searchField=1&key=%E8%BF%90%E8%90%A5&headckid=91cd8f5ec5e26e2a&d_pageSize=40&siTag=R9-HVpy-uaCbbNm39JhEuQ~fA9rXquZc5IkJpXC-Ycixw&d_headId=a997d6cf7ebb7d8fa5c3564b7e924ba3&d_ckId=a997d6cf7ebb7d8fa5c3564b7e924ba3&d_sfrom=search_fp_nvbar&d_curPage=0&curPage={}'
    #     base_url = 'https://www.liepin.com/zhaopin/?pubTime=&ckid=e21e80660dbc94a8&fromSearchBtn=2&compkind=&isAnalysis=&init=-1&searchType=1&dqs=&industryType=&jobKind=&sortFlag=15&degradeFlag=0&industries=&salary=&compscale=&key=emc&clean_condition=&headckid=e21e80660dbc94a8&d_pageSize=40&siTag=m8k8ceXB2_B2jdfaMyKQGw~fA9rXquZc5IkJpXC-Ycixw&d_headId=c75434f2b1cc439d3471a28713e2d46b&d_ckId=c75434f2b1cc439d3471a28713e2d46b&d_sfrom=search_prime&d_curPage=0&curPage={}'
    #     for i in range(50):
    #         url = base_url.format(i+1)
    #         req = scrapy.Request(url=url)
    #         yield req
    #
    # def parse(self, response):
    #     all_url = response.xpath('//div[@class="job-info"]/h3/a/@href').extract()
    #     for i in all_url:
    #         res = scrapy.Request(url=i,callback=self.parse_detail)
    #         yield res
    #     # print(img_url)

    def parse_detail(self,response):
        try:
            title = response.xpath('//h1/text()').extract_first()
            company = response.xpath('//h3/a/@title').extract_first()
            time = response.xpath('//p[@class="basic-infor"]/time/@title').extract_first()
            addr = response.xpath('//p[@class="basic-infor"]/span/a/text()').extract_first()
            salary = response.xpath('//p[@class="job-item-title"]/text()').extract_first()
            experience = response.xpath('//div[@class="job-qualifications"]/span[1]/text()').extract_first()
            Education = response.xpath('//div[@class="job-qualifications"]/span[2]/text()').extract_first()
            desc = response.xpath('//div[@class="content content-word"]//text()').extract()
            new_desc = []
            for i in desc:
                i = i.strip()
                new_desc.append(i)
            new_desc = ''.join(new_desc)
            name = response.xpath('//head//title/text()').extract_first()
            name = name.split('-')[-1]
            this_url = response.url
            print(title,salary)
            item = LiePingItem()
            item['title'] = title
            item['salary'] = salary
            item['company'] = company
            item['addr'] = addr
            item['experience'] = experience
            item['Education'] = Education
            item['name'] = name
            item['new_desc'] = new_desc
            item['this_url'] = this_url
            item['time'] = time
            yield  item
        except:
            pass








