import scrapy
import json
import logging
import random
from bosszp.items import BosszpItem


class BossSpider(scrapy.Spider):
    name = 'boss'
    allowed_domains = ['zhipin.com']
    start_urls = ['https://www.zhipin.com/wapi/zpCommon/data/cityGroup.json']
    # 设置多个 cookie,建议数量为 页数/2 + 1 个cookie.至少 设置 3 个
    cookies = [
        '__zp_stoken__=3878eZ3QFHW1CPzFhZj0wAHwGJXUMHW1jFTlIUVl9cWkbfWw%2BIWwFNFpuOXZFcWUdLVcSYXoEfiADciQmNBFVGEopFzcTSidtPFE0aQEuLBIOMH8FRm0VTXxPHmxDAQl%2Fb24gAGAOfFxLPCE%3D',
        '__zp_stoken__=3878eZ3QFHW1CPzlRJlRVAHwGJXVsP2RJfmRIUVl9cR0AFHB1IWwFNFpuIHxjaFkhLVcSYXoEcT50DC8mOkZQY2onERtgCFBnJ2w%2FaQEuLBIOMEM5X0sfVHxPHmxDAQl%2Fb24gAGAOfFxLPCE%3D',
        '__zp_stoken__=3878eZ3QFHW1CPzQKcW8tAHwGJXV5GQ5oEnNIUVl9cWMaCXULIWwFNFpuOwsiVEMwLVcSYXoEcT50AFEmJTpPS09DQwobUHRGH2UbaQEuLBIOMFIjYwpoT3xPHmxDAQl%2Fb24gAGAOfFxLPCE%3D',
        '__zp_stoken__=3878eZ3QFHW1CPzN2WHtaAHwGJXU0DGx%2FQWFIUVl9cSNHY28gIWwFNFpuSgolankYLVcSYXoEcSR7D3MmN3JxalJHcCEVaH9IP20caQEuLBIOMHoZXQ1pPnxPHmxDAQl%2Fb24gAGAOfFxLPCE%3D',
        '__zp_stoken__=3878eZ3QFHW1CP1ZZJT1xAHwGJXUBGEtPQWRIUVl9cT9QHzMcIWwFNFpuOAlLWENlLVcSYXoEcSRVeSQmMA1KRENdTQ4cdVRjHQM5aQEuLBIOMAcjb2NqTHxPHmxDAQl%2Fb24gAGAOfFxLPCE%3D',
        '__zp_stoken__=3878eZ3QFHW1CPylSbyQmAHwGJXUBKhlgBDhIUVl9cXpAYXRtIWwFNFpuIH9fL0B4LVcSYXoEcSQJciAmNGR1QTA1KEwUX2pcPV4GaQEuLBIOMBogGHccVHxPHmxDAQl%2Fb24gAGAOfFxLPCE%3D'
    ]
    # 设置多个请求头
    user_agents = [
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36 Edg/110.0.1587.50",
        "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.72 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:38.0) Gecko/20100101 Firefox/38.0",
        "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; .NET4.0C; .NET4.0E; .NET CLR 2.0.50727; .NET CLR 3.0.30729; .NET CLR 3.5.30729; InfoPath.3; rv:11.0) like Gecko",
        "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
        "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)"
        # "Mozilla/5.0 (iPhone; CPU iPhone OS 13_2_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Mobile/15E148 Safari/604.1 Edg/110.0.0.0"
    ]
    page_no = 1  # 初始化分页

    def random_header(self):
        """
        随机生成请求头
        :return: headers
        """
        headers = {'Referer': 'https://www.zhipin.com/c101020100/?ka=sel-city-101020100'}
        headers['cookie'] = random.choice(self.cookies)
        headers['user-agent'] = random.choice(self.user_agents)
        return headers

    def parse(self, response):
        """
        解析首页热门城市列表，选择热门城市进行爬取
        :param response: 热门城市字典数据
        :return:
        """
        # 获取服务器返回的内容
        city_group = json.loads(response.body.decode())
        # 获取热门城市列表
        hot_city_list = city_group['zpData']['hotCityList']
        # 初始化空列表，存储打印信息
        # city_lst = []
        # for index,item in enumerate(hot_city_list):
        #    city_lst.apend({index+1: item['name']})
        # 列表推导式：
        hot_city_names = [{index + 1: item['name']} for index, item in enumerate(hot_city_list)]
        print("--->", hot_city_names)
        # 从键盘获取城市编号
        city_no = int(input('请从上述城市列表中，选择编号开始爬取：'))
        # 拼接url https://www.zhipin.com/job_detail/?query=&city=101040100&industry=&position=
        # 获取城市编码code
        city_code = hot_city_list[city_no - 1]['code']
        city_url = 'https://www.zhipin.com/job_detail/?query=&city={}&industry=&position='.format(city_code)
        logging.info("<<<<<<<<<<<<<正在爬取第_{}_页岗位数据>>>>>>>>>>>>>".format(self.page_no))
        yield scrapy.Request(url=city_url, headers=self.random_header(), callback=self.parse_city)

    def parse_city(self, response):
        """
        解析岗位页数据
        :param response: 岗位页响应数据
        :return:
        """
        if response.status != 200:
            logging.warning("<<<<<<<<<<<<<获取城市招聘信息失败，ip已被封禁。请稍后重试>>>>>>>>>>>>>")
            return
        li_elements = response.xpath('//div[@class="search-job-result"]/ul/li')  # 定位到所有的li标签
        next_url = response.xpath('//div[@class="options-pages"]/a[last()]/@class').get()  # 获取下一页

        for li in li_elements:
            job_name = li.xpath('./div[1]/a/div[@class="job-title clearfix"]//span[@class="job-name"]/text()').get()
            job_area = li.xpath('./div[1]/a/div[@class="job-title clearfix"]//span[@class="job-area"]/text()').get()
            job_salary = li.xpath('./div[1]/a/div[@class="job-info clearfix"]/span[@class="salary"]/text()').get()
            com_name = li.xpath('./div[1]/div[@class="job-card-right"]//div[@class="company-text"]/h3/a/text()').get()
            companyInfo = li.xpath(".//div[@class='job-card-right']/div[@class='company-info']/ul[@class='company-tag-list']/li")
            if len(companyInfo) == 3:
                # com_type
                com_type = companyInfo[0].text
                # finance_stage
                finance_stage = companyInfo[1].text
                # com_size
                com_size = companyInfo[2].text
                if com_size != '10000人以上':
                    com_size = list(map(lambda x: int(x), companyInfo[2].text.replace('人', '').split('-')))
                else:
                    com_size = [0, 10000]
            else:
                # com_type
                com_type = companyInfo[0].text
                # finance_stage
                finance_stage = '未融资'
                # com_size
                com_size = companyInfo[1].text
                if com_size != '10000人以上':
                    com_size = list(map(lambda x: int(x), companyInfo[1].text.replace('人', '').split('-')))
                else:
                    com_size = [0, 10000]
            # com_type = li.xpath('./div/div[1]/div[2]/div[1]/p/a/text()').get()
            # com_size = li.xpath('./div/div[1]/div[2]/div[1]/p/text()[2]').get()
            # finance_stage = li.xpath('./div/div[1]/div[2]/div[1]/p/text()[1]').get()
            work_year = li.xpath('./div[@class="job-card-body clearfix"]/a/div[@class="job-info clearfix"]/u1[@class="tag-list"]/li/text()[1]').get()
            education = li.xpath('./div[@class="job-card-body clearfix"]/a/div[@class="job-info clearfix"]/u1[@class="tag-list"]/li/text()[2]').get()
            job_benefits = li.xpath('./div[@class="job-card-footer clearfix"]/div[@class="info-desc"]/text()').get()
            print(job_name, job_area, job_salary, com_name, com_type, com_size, finance_stage, work_year, education,
                  job_benefits)
            item = BosszpItem(job_name=job_name, job_area=job_area, job_salary=job_salary, com_name=com_name,
                              com_type=com_type, com_size=com_size,
                              finance_stage=finance_stage, work_year=work_year, education=education,
                              job_benefits=job_benefits)
            print(11111112)
            yield item
        if next_url == "disabled":
            logging.info('<<<<<<<<<<<<<热门城市岗位数据已爬取结束>>>>>>>>>>>>>')
            logging.info("<<<<<<<<<<<<<一共爬取了_{}_页岗位数据>>>>>>>>>>>>>".format(self.page_no))
            return
        next_url = response.urljoin(response.xpath(int('//div[@class="options-pages"]/a[@class="selected"]/text()'))+1)  # 网址拼接
        self.page_no += 1
        logging.info("<<<<<<<<<<<<<正在爬取第_{}_页岗位数据>>>>>>>>>>>>>".format(self.page_no))
        yield scrapy.Request(url=next_url, headers=self.random_header(), callback=self.parse_city)
