import re

import requests
import scrapy
from mySpider.items import MyspiderItem
from mySpider.settings import ipPool


class BossdataSpider(scrapy.Spider):
    name = "bossData"
    allowed_domains = ["www.zhipin.com"]
    start_urls = ["https://www.zhipin.com/"]

    def parse(self, response):
        job_menu = response.xpath('//*[@id="main"]/div/div[1]/div/div[1]/dl[1]/div/ul/li')
        typeList = []
        spiderUrl = 'https://www.zhipin.com/web/geek/job?query=%s&city=100010000&page=%s'
        for i in job_menu:
            types = i.xpath('./div/a/text()').extract()
            for type in types:
                typeList.append(type)

        for type in typeList:
            for i in range(1,16):
                url = spiderUrl % (type, i)
                yield scrapy.Request(url, callback=self.parse_detail,meta={'type':type})


    def parse_detail(self,response):
        jobList = response.xpath('//ul[@class="job-list-box"]/li')
        for job in jobList:
            item = MyspiderItem()
            item['type'] = response.meta['type']
            item['title'] = job.xpath('.//a[@class="job-card-left"]/div[contains(@class,"job-title")]/span[@class="job-name"]/text()').extract_first()
            item['companyTitle'] = job.xpath('.//div[@class="job-card-right"]/div[@class="company-info"]/h3/a/text()').extract_first()
            salaries = job.xpath('.//a[@class="job-card-left"]/div[contains(@class,"job-info")]/span[@class="salary"]/text()').extract_first()
            if 'K' in salaries:  # 区分实习岗与正式岗，此为正式岗
                salaries = salaries.split('·')  # 区分是否年底加薪
                if len(salaries) == 1:  # 年底无加薪
                    salary = list(map(lambda x: int(x) * 1000, salaries[0].replace('K', '').split('-')))
                    salaryMonth = '12薪'
                else:  # 年底加薪
                    salary = list(map(lambda x: int(x) * 1000, salaries[0].replace('K', '').split('-')))
                    salaryMonth = salaries[1]
            else:  # 此为实习岗
                if '元/天' in salaries:
                    salary = list(map(lambda x: int(x), salaries.replace('元/天', '').split('-')))
                else:
                    salary = list(map(lambda x: int(x), salaries.replace('元/月', '').split('-')))
                salaryMonth = '12薪'
            item['minSalary'] = salary[0]
            item['maxSalary'] = salary[1]

            tag_list = job.xpath('.//a[@class="job-card-left"]/div[contains(@class,"job-info")]/ul/li')
            if len(tag_list) == 2:  # 正式岗位招聘
                item['educational'] = tag_list[1].xpath("./text()").extract_first()
                item['workExperience'] = tag_list[0].xpath("./text()").extract_first()
            else:  # 实习岗位招聘
                item['educational'] = tag_list[2].xpath("./text()").extract_first()
                item['workExperience'] = tag_list[1].xpath("./text()").extract_first()
            # detailUrl
            item['detailUrl'] = job.xpath('./div[1]/a/@href').extract_first()

            totalTags = job.xpath('.//div[@class="job-card-right"]/div[@class="company-info"]/ul[@class="company-tag-list"]/li')
            totalTag = ' / '.join(i.xpath("./text()").extract_first() for i in totalTags)
            item['companyPeople'] = re.findall(r'\d+', totalTag)

            workTag_list = job.xpath('./div[contains(@class,"job-card-footer")]/ul[@class="tag-list"]/li')
            text_list = []
            # 遍历每个 <li> 元素，获取文本并添加到列表中
            for element in workTag_list:
                text = element.xpath("./text()").extract_first()
                text_list.append(text)
            item['workTag'] = " / ".join(text_list)

            item['welfare'] = job.xpath('./div[contains(@class,"job-card-footer")]/div[@class="info-desc"]/text()').extract_first()
            if item['welfare'] == '':
                item['welfare'] = '福利少'
            if salaryMonth != '12薪':
                item['welfare'] += '，' + salaryMonth

            item['imgSrc'] = job.xpath('.//div[@class="job-card-right"]/div[@class="company-logo"]/a/img/@src').extract_first()
            address = job.xpath('.//a[@class="job-card-left"]/div[contains(@class,"job-title")]/span[@class="job-area-wrapper"]/span[@class="job-area"]/text()').extract_first()
            item['city'] = address.split('·')[0]
            print(item)
            yield item

