import re
import sys

from scipy.constants import value
from selenium import webdriver
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
import time
import os
import csv
import json
import pandas as pd


class spider(object):
    def __init__(self, type, page):
        self.type = type  # 岗位关键字
        self.page = page  # 页码数
        self.spiderUrl = 'https://www.zhipin.com/web/geek/job?query=%s&city=100010000&page=%s'

    def startBrowser(self):
        service = Service('./chromedriver.exe')
        options = webdriver.ChromeOptions()
        # #浏览器复用，不再新开浏览器操作，对boss网反爬机制有效果
        # options.add_experimental_option('debuggerAddress', 'localhost:9222')

        options.add_experimental_option('excludeSwitches', ['enable-automation'])  # 隐藏浏览器被自动化控制的迹象，避免被某些网站检测到自动化操作。
        browser = webdriver.Chrome(service=service, options=options)
        return browser

    def main(self, page):
        if self.page > page: return
        browser = self.startBrowser()
        print("正在爬取页面路径：" + self.spiderUrl % (self.type, self.page))
        browser.get(self.spiderUrl % (self.type, self.page))
        time.sleep(5)
        job_list = browser.find_elements(by=By.XPATH, value='//ul[@class="job-list-box"]/li')
        for index, job in enumerate(job_list):
            try:
                jobData=[]
                print("正在爬取第%d个数据" % (index + 1))
                # title
                title = job.find_element(by=By.XPATH,
                                         value='.//a[@class="job-card-left"]/div[contains(@class,"job-title")]/span[@class="job-name"]').text
                # address
                addresses = job.find_element(by=By.XPATH,
                                             value='.//a[@class="job-card-left"]/div[contains(@class,"job-title")]/span[@class="job-area-wrapper"]/span[@class="job-area"]').text.split(
                    '·')
                city = addresses[0]

                #detailUrl
                detailUrl = job.find_element(by=By.XPATH,value='./div[1]/a').get_attribute('href')
                # type
                type = self.type

                tag_list = job.find_elements(by=By.XPATH,
                                             value='.//a[@class="job-card-left"]/div[contains(@class,"job-info")]/ul/li')
                if len(tag_list) == 2:  # 正式岗位招聘
                    # educational
                    educational = tag_list[1].text
                    # workExperience
                    workExperience = tag_list[0].text
                else:  # 实习岗位招聘
                    # educational
                    educational = tag_list[2].text
                    # workExperience
                    workExperience = tag_list[1].text

                # workTags
                workTag_list = job.find_elements(by=By.XPATH,
                                                 value='./div[contains(@class,"job-card-footer")]/ul[@class="tag-list"]/li')
                # 用于存储获取到的 <li> 元素文本
                text_list = []

                # 遍历每个 <li> 元素，获取文本并添加到列表中
                for element in workTag_list:
                    text = element.text
                    text_list.append(text)
                workTags = " / ".join(text_list)

                salaries = job.find_element(by=By.XPATH,
                                            value='.//a[@class="job-card-left"]/div[contains(@class,"job-info")]/span[@class="salary"]').text

                if 'K' in salaries:  # 区分实习岗与正式岗，此为正式岗
                    salaries = salaries.split('·')  # 区分是否年底加薪
                    if len(salaries) == 1:  # 年底无加薪
                        # salary
                        salary = list(map(lambda x: int(x) * 1000, salaries[0].replace('K', '').split('-')))
                        # salaryMonth
                        salaryMonth = '12薪'
                    else:  # 年底加薪
                        # salary
                        salary = list(map(lambda x: int(x) * 1000, salaries[0].replace('K', '').split('-')))
                        # salaryMonth
                        salaryMonth = salaries[1]
                else:  # 此为实习岗
                    # salary
                    if '元/天' in salaries:
                        salary = list(map(lambda x: int(x), salaries.replace('元/天', '').split('-')))
                    else:
                        salary = list(map(lambda x: int(x), salaries.replace('元/月', '').split('-')))
                    # salaryMonth
                    salaryMonth = '12薪'

                # minSalary
                minSalary = salary[0]
                # maxSalary
                maxSalary = salary[1]

                # companyTitle
                companyTitle = job.find_element(by=By.XPATH,
                                                value='.//div[@class="job-card-right"]/div[@class="company-info"]/h3/a').text
                # imgSrc
                imgSrc = job.find_element(by=By.XPATH,
                                          value='.//div[@class="job-card-right"]/div[@class="company-logo"]/a/img').get_attribute(
                    'src')

                # totalTag
                try:
                    totalTags = job.find_elements(by=By.XPATH,
                                                  value='.//div[@class="job-card-right"]/div[@class="company-info"]/ul[@class="company-tag-list"]/li')
                    totalTag = ' / '.join(i.text for i in totalTags)
                    # companyPeople
                    companyPeople = re.findall(r'\d+', totalTag)  # 获取公司人数为数组
                except:
                    # 若获取不到信息，没有对应标签
                    totalTag = '无'
                try:
                    # 若能获取公司人数
                    companyPeople = '-'.join(companyPeople)
                except:
                    # 否则设置公司人数为公司最小人数10人
                    companyPeople = [10]

                # welfare
                welfare = job.find_element(by=By.XPATH,
                                           value='./div[contains(@class,"job-card-footer")]/div[@class="info-desc"]').text
                if welfare == '':
                    welfare = '福利少'
                if salaryMonth != '12薪':
                    welfare += '，' + salaryMonth

                jobData.append(type)
                jobData.append(title)
                jobData.append(companyTitle)
                jobData.append(int(minSalary))
                jobData.append(int(maxSalary))
                jobData.append(workExperience)
                jobData.append(educational)
                jobData.append(detailUrl)
                jobData.append(companyPeople)
                jobData.append(workTags)
                jobData.append(welfare)
                jobData.append(imgSrc)
                jobData.append(city)
                self.save_to_csv(jobData)
            except:
                pass

        self.page += 1
        time.sleep(1)  # 防止出发反爬机制
        self.main(page)

    def save_to_csv(self, rowData):
        with open('./jobData.csv', 'a', newline='', encoding='utf8') as wf:
            writer = csv.writer(wf)
            writer.writerow(rowData)

    def initCsv(self):
        if not os.path.exists('./jobData.csv'):
            with open('./jobData.csv', 'w', newline='', encoding='utf8') as wf:
                writer = csv.writer(wf)  # 创建一个 CSV 写入器对象，用于将数据写入打开的文件。
                # 写入表头
                writer.writerow(
                    ['type', 'title', 'companyTitle', 'minSalary', 'maxSalary', 'workExperience',
                     'educational', 'detailUrl', 'companyPeople', 'workTag', 'welfare', 'imgSrc', 'city']
                )


"""
['数据采集', '性能测试', '爬虫工程师', 'SLAM算法', '数据开发', '自然语言处理算法', '前端开发工程师', 'iOS', 'PHP', '游戏测试', '售前技术支持', '实施工程师', 'C/C++', '鸿蒙开发工程师', '算法工程师', '实施顾问', '数据分析师', '自动化测试', '深度学习', '自动驾驶系统工程师', 'Android', '功能测试', 'Java', '架构师', '客户成功', '技术美术', '销售技术支持', '数据仓库', '风控算法', '硬件项目经理', '数据标注/AI训练师', '图像算法', '其他技术职位', '技术合伙人', '技术经理', 'GIS工程师', '技术文档工程师', '运维总监', '测试开发', 'Cocos', 'C#', '运维开发工程师', '系统安全', '系统管理员', '测试工程师', '技术总监', '规控算法', '网络工程师', '渗透测试', '项目经理/主管', '需求分析工程师', '网络安全', 'Node.js', 'DBA', '电脑/打印机维修', '项目专员', '软件测试', '算法研究员', '硬件测试', 'Golang', '项目助理', '数据挖掘', '全栈工程师', 'JavaScript', '大模型算法', '语音算法', '语音/视频/图形开发', '系统工程师', '其他后端开发', '数据治理', 'U3D', 'UE4', '运维工程师', '数据架构师', 'ETL工程师', 'IT技术支持', '机器学习', '区块链工程师', 'Python', '.NET', '测试经理', '搜索算法', '推荐算法', '售后技术支持', 'CTO/CIO']"""


if __name__ == '__main__':
    spiderObj = spider('', 1)
    spiderObj.initCsv()
    df = pd.read_csv('./jobUrl.csv')
    data_list = df.values.tolist()
    typeList = list(set([i[0] for i in data_list]))
    already_exist = []
    for type in typeList:
        if type not in already_exist:
            already_exist.append(type)
        spiderObj = spider(type, 1)
        spiderObj.main(15)
