# -*- coding: utf-8 -*-
import scrapy
try:
    import urlparse
    urlparse = urlparse.urlparse
except:
    from urllib.parse import urlparse  # python3用法
from bs4 import BeautifulSoup


class BossZhipinSpider(scrapy.Spider):
    # 定义spider的名字
    name = 'boss_zhipin'
    # 定义爬取的域
    # allowed_domains = ['zhipin.com']
    # 定义入口URL
    start_urls = ['https://www.zhipin.com/c101020100-p100109/?page=1&ka=page-1']


    def start_requests(self):
        yield scrapy.Request(url=self.start_urls[0],
                             encoding='utf-8',
                             headers=self.head(self.start_urls[0]),
                             callback=self.parse,
                             dont_filter=True)

    # 定义解析规则,这个方法必须叫做parse
    def parse(self, response):
        resp_body = response.body
        resp_doc = BeautifulSoup(resp_body, "lxml")
        # item = BossItem()
        # 获取页面数据的条数 先分组再提取
        node_list = response.xpath("//div[@id='main']/div/div[@class='job-list']/ul/li")
        # 循环解析页面的数据
        for node in node_list:
            item = {}
            item["job_title"] = node.xpth(".//div[@class='job-title']/text()").extract_first()
            item["compensation"] = node.xpath(".//span[@class='red']/text()").extract_first()
            item["company"] = node.xapth(".//div[@class='company-text']/h3/a/text()").extract_first()
            company_info = node.xpath(".//div/div[@class='company-text']/p/text()").extract()
            temp = node.xpath(".//div/div[@class='info-primary']/p/text()").extract()
            item["address"] = temp[0]
            item["seniority"] = temp[1]
            item["education"] = temp[2]
            if len(company_info) < 3:
                item["company_type"] = company_info[0]
                item["company_finance"] = ""
                item["company_quorum"] = company_info[-1]
            else:
                item["company_type"] = company_info[0]
                item["company_finance"] = company_info[1]
                item["company_quorum"] = company_info[2]
            yield item

        # 定义下页标签的元素位置
        next_page = response.xpath("//div[@class='page']/a/@href").extract()
        # 判断什么时候下页没有任何数据
        if next_page != 'javascript:;':
            base_url = "https://www.zhipin.com"
            url = base_url + next_page
            print(url)
            # yield scrapy.Request(url=url, callback=self.parse)

    def head(self, u, dict_data=None):
        headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate, br',
            'Accept-Language': 'zh-CN,zh;q=0.9',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            # 'Host': 'www.sirene.fr',
            'Host': urlparse(u).netloc,
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'
        }
        if isinstance(dict_data, dict):
            new_dict = {}
            for k, v in dict_data.items():
                # # 把第一个字母转化为大写字母，其余小写
                # new_dict[k.capitalize()] = v
                # 把每个单词的第一个字母转化为大写，其余小写
                new_dict[k.title()] = v
            headers.update(new_dict)
        return headers

if __name__ == '__main__':
    import os, re
    from scrapy.crawler import CrawlerProcess
    from scrapy.utils.project import get_project_settings

    fileName = os.path.abspath(__file__)
    file = open(fileName, 'r', encoding='utf-8').readlines()
    className = None
    for i in file:
        if i.startswith('class'):
            className = re.findall(r'\s+(\w+)\(', i)[0]
            break

    if className:
        name = eval(className).name

        project_settings = get_project_settings()
        process = CrawlerProcess(settings=project_settings)
        process.crawl(eval(className), name=name)
        process.start()
    else:
        print('获取类名失败！')

