# -*- coding: utf-8 -*-

import scrapy
from ..items import BosszhipinItem


class BossSpider(scrapy.Spider):
    name = 'boss'
    # 1.	使用scrapy框架进行爬取，抓取前十页的数据信息（10分）
    start_urls = ['https://www.zhipin.com/c101010100/?page=1&ka=page-1']
    for page in range(2, 11):
        url = 'https://www.zhipin.com/c101010100/?page=' + str(page) + '&ka=page-' + str(page)
        start_urls.append(url)

    def parse(self, response):
        # 2.	获取每一条招聘信息的详细页链接，进入详细页(10分）
        all_a = response.xpath("//*[@id='main']/div/div[2]/ul/li/div/div[1]/h3/a")
        for a in all_a:
            # 3.	请使用正则或者bs4，xpath进行数据解析（10分）
            href = 'https://www.zhipin.com' + a.xpath("./@href")[0].extract()
            yield scrapy.Request(url=href, callback=self.parse_xq, meta={'href': href})

    def parse_xq(self, response):
        href = response.meta['href']
        # 4.	获取招聘的公司基本信息（10分）
        xinxis = response.xpath("string(//div[@class='job-sider']/div[@class='sider-company'])")[0].extract()
        xinxi = ','.join([i for i in xinxis.split(' ') if i != '' and i != '\n']).replace('\n', '')
        # 5.	获取招聘的岗位名称，薪资范围（10分）
        gangwei = response.xpath("//div[@class='info-primary']/div[@class='name']/h1/text()")[0].extract()
        xinzi = response.xpath("//div[@class='name']/span[@class='salary']/text()")[0].extract().strip()
        # 6.	获取岗位的工作年限要求，以及学历要求（10分）
        nianxian = response.xpath("//div[@class='info-primary']/p/text()")[1].extract()
        xueli = response.xpath("//div[@class='info-primary']/p/text()")[2].extract()
        # 7.	获取当前hr的名称，以及在线的时间（10分）
        hr_name = response.xpath("//div[@class='detail-op']/h2[@class='name']/text()")[0].extract()
        hr_shijian = response.xpath("//div[@class='detail-op']/p[@class='gray']/text()")[1].extract()
        # 8.	获取当前的岗位职责，以及任职要求（10分）
        zhizeyaoqiu = response.xpath("string(//div[@class='job-sec'][1]/div[@class='text'])")[0].extract().strip()
        # 9.	获取当前的公司介绍信息，以及工商信息，当前公司的工作地址（10分）
        jieshao = response.xpath("string(//div[@class='job-sec company-info']/div[@class='text'])")[0].extract().strip()
        div_count = len(response.xpath("//div[@class='detail-content']/div[@class='job-sec']"))
        if div_count == 3:
            gongshangs = response.xpath("string(//div[@class='detail-content']/div[@class='job-sec'][2])")[
                0].extract()
            gongshang = ','.join([i for i in gongshangs.split(' ') if i != '' and i != '\n']).replace('\n', '')
            dizhis = response.xpath("string(//div[@class='detail-content']/div[@class='job-sec'][3])")[
                0].extract()
            dizhi = ','.join([i for i in dizhis.split(' ') if i != '' and i != '\n']).replace('\n', '')
        if div_count == 4:
            gongshangs = response.xpath("string(//div[@class='detail-content']/div[@class='job-sec'][3])")[
                0].extract()
            gongshang = ','.join([i for i in gongshangs.split(' ') if i != '' and i != '\n']).replace('\n', '')
            dizhis = response.xpath("string(//div[@class='detail-content']/div[@class='job-sec'][4])")[
                0].extract()
            dizhi = ','.join([i for i in dizhis.split(' ') if i != '' and i != '\n']).replace('\n', '')
        # 10.	将抓取的数据存储到mongo数据库中，并且对代码进行注释，有调试的过程（10分）
        item = BosszhipinItem()
        item['gangwei'] = gangwei
        item['xinzi'] = xinzi
        item['nianxian'] = nianxian
        item['xueli'] = xueli
        item['href'] = href
        item['hr_name'] = hr_name
        item['hr_shijian'] = hr_shijian
        item['zhizeyaoqiu'] = zhizeyaoqiu
        item['xinxi'] = xinxi
        item['jieshao'] = jieshao
        item['gongshang'] = gongshang
        item['dizhi'] = dizhi
        yield item
