import os
import time

import scrapy
import json
import logging
import random
from bosszp.items import BosszpItem


class BossSpider(scrapy.Spider):
    name = 'boss'
    allowed_domains = ['zhipin.com']
    start_urls = ['https://www.zhipin.com/wapi/zpCommon/data/cityGroup.json']
    city_url = 'https://www.zhipin.com/wapi/zpgeek/search/joblist.json?scene=1&query=&city={}&experience=&degree=&industry=&scale=&stage=&position=&salary=&multiBusinessDistrict=&page={}&pageSize=30'
    # 设置多个 cookie,建议数量为 页数/2 + 1 个cookie.至少 设置 3 个
    cookies = [
        '__zp_stoken__=297ceYQE6J2BmGGtsOwFmfgZ7Bz9hSRZrGhwPez1XHGQGeEI3bTt%2BYQtbVmlBDkA6JjtlZwk9IDADNXcfPRZzWCR0fwceXA5NR1M%2FUBkZeBxjXCEfGDxEFXMkfFlBbDBxZAJXBntDC01LBgc%3D',
        '__zp_stoken__=297ceYQE6J2BmGEpSKFB6fgZ7Bz9gSXgqWV0Pez1XHBpxDWcLBTt%2BYQtbYWQ8ZwA6JjtlZwk9IDB0SHsfKQQMS0N4PQwsVBNJO1khUBkZeBxjXCFfcUFJInMkfFlBBDBxZAJXBntDC01LBgc%3D'
        '__zp_stoken__=297ceYQE6J2BmGFg6YVBIfgZ7Bz97XXdzagsPez1XHAJ4f3ImKzt%2BYQtbYkYoQH06JjtlZwk9IDAcNgMfJxJIY1RaP0Y8WhBWHFkiUBkZeBxjXCEiVlVrIXMkfFlBKjBxZAJXBntDC01LBgc%3D'
        '__zp_stoken__=9d52eEDo6NiVVH1RqWUB3Gjp8dWxgXjxZAkY8JVIRCwgkOjoXfilpJVccPTEFRml4AntSFk0vKDBEA0IAB1FZCxdhUG5QWj1lLR0jTzJoDS42HE9cH0tVR24iAQdgMCtqQEJgdz9bXU1aTT4%3D'
    ]
    # 设置多个请求头
    user_agents = [
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.1 Safari/605.1.15',
        'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0',
        'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
        'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)',
        'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)',
    ]
    page_no = 1  # 初始化分页

    def random_header(self):
        """
        随机生成请求头
        :return: headers
        """
        headers = {'Referer': 'https://www.zhipin.com/'}
        headers['cookie'] = random.choice(self.cookies)
        # headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
        headers['user-agent'] = random.choice(self.user_agents)
        return headers
    def get_city_code(self,response):
        """
               解析首页热门城市列表，选择热门城市进行爬取
               :param response: 热门城市字典数据
               :return:
               """
        # 获取热门城市列表
        city_group = json.load(open('city_group.json', 'rb')) if os.path.isfile('city_group.json') else []
        if not city_group:
            city_group = json.loads(response.body.decode())
            with open('city_group.json','wb') as f:
                f.write(json.dumps(city_group).encode('utf-8'))
        hot_city_list = city_group['zpData']['hotCityList']
        # 初始化空列表，存储打印信息
        # city_lst = []
        # for index,item in enumerate(hot_city_list):
        #    city_lst.apend({index+1: item['name']})
        # 列表推导式：
        hot_city_names = [{index + 1: item['name']} for index, item in enumerate(hot_city_list)]
        print("--->", hot_city_names)
        # 从键盘获取城市编号
        city_no = int(input('请从上述城市列表中，选择编号开始爬取：'))
        # 拼接url https://www.zhipin.com/job_detail/?query=&city=101040100&industry=&position=
        # 获取城市编码code
        city_code = hot_city_list[city_no - 1]['code']
        return city_code
    def parse(self, response):

        logging.info("<<<<<<<<<<<<<正在爬取第_{}_页岗位数据>>>>>>>>>>>>>".format(self.page_no))
        yield scrapy.Request(url=self.city_url.format(self.get_city_code(response),self.page_no), headers=self.random_header(),cookies=self.cookies, callback=self.parse_city)

    def parse_city(self, response):

        """
        解析岗位页数据
        :param response: 岗位页响应数据
        :return:
        """
        if response.status != 200:
            logging.warning("<<<<<<<<<<<<<获取城市招聘信息失败，ip已被封禁。请稍后重试>>>>>>>>>>>>>")
            return
        print(response.text)
        data = json.loads(response.text)

        for li in data["zpData"]["jobList"]:

            job_name = li["jobName"]
            # job_area = li["cityName"]+li["areaDistrict"]+li["businessDistrict"]
            city=li["cityName"]
            district=li["areaDistrict"]
            street=li["businessDistrict"]
            job_salary = li["salaryDesc"]
            com_name = li["brandName"]
            com_type = li["brandIndustry"]
            com_size = li["brandScaleName"]
            finance_stage = li["brandStageName"]
            work_year = li["jobExperience"]
            education = li["jobDegree"]
            job_benefits = '' if li["welfareList"]==[] else ''.join([i.strip()+',' for i in li["welfareList"]])
            item = BosszpItem(job_name=job_name, city=city,district=district,street=street, job_salary=job_salary, com_name=com_name,
                              com_type=com_type, com_size=com_size,finance_stage=finance_stage, work_year=work_year, education=education,job_benefits=job_benefits)
            yield item
        self.page_no += 1
        next_url = self.city_url.format(self.get_city_code(response),self.page_no)  # 网址拼接
        logging.info("<<<<<<<<<<<<<正在爬取第_{}_页岗位数据>>>>>>>>>>>>>".format(self.page_no))
        yield scrapy.Request(url=next_url, headers=self.random_header(), callback=self.parse_city)
