from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.edge.service import Service
from webdriver_manager.microsoft import EdgeChromiumDriverManager
from fake_useragent import UserAgent
from bs4 import BeautifulSoup
import time
import random
import pandas as pd
import json
import threading
from concurrent.futures import ThreadPoolExecutor


liepin_urls = [
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0001&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=f6tu9liog1eh84ziuo64kyzx21rpgl5a&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=f6tu9liog1eh84ziuo64kyzx21rpgl5a&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0002&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=w0o0wuh6uo5xkhv4r6w31i0k6koakk39&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=w0o0wuh6uo5xkhv4r6w31i0k6koakk39&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0003&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=2nolto3fck8eznkdkn3ifpkb0kfovgbb&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=2nolto3fck8eznkdkn3ifpkb0kfovgbb&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0004&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=v0mai0ik57wzuqff9omqwft2kta4o6rb&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=v0mai0ik57wzuqff9omqwft2kta4o6rb&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0005&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&ckId=n1jryf4j7ylbh17ddomtivfm2cfelpc8&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=n1jryf4j7ylbh17ddomtivfm2cfelpc8&sfrom=search_job_pc&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0006&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=2rb1qh3jfnhu8e3zmj3n79hhmklab2u8&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=2rb1qh3jfnhu8e3zmj3n79hhmklab2u8&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0007&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=8mgj5irn93o1hs1oqz2xmb3w5uwzr3qb&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=8mgj5irn93o1hs1oqz2xmb3w5uwzr3qb&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0008&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=8esiisqrz0jj6jbesx8h6hjma0zomneb&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=8esiisqrz0jj6jbesx8h6hjma0zomneb&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0009&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&ckId=ctj9v96544izcqok9vktz239aew9zip8&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=ctj9v96544izcqok9vktz239aew9zip8&sfrom=search_job_pc&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0010&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=052d4rtnbvx1jliqk2sbfx4zx4rp8gsa&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=052d4rtnbvx1jliqk2sbfx4zx4rp8gsa&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0011&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=xm7i3hyp6b3uqnipalvbl2hi87qnwzda&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=xm7i3hyp6b3uqnipalvbl2hi87qnwzda&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0012&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=xybn93vbeasui433mqyy812loiqdeuna&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=xybn93vbeasui433mqyy812loiqdeuna&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0013&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=44lyueyc2n0e5u4srqk4derqtdl036aa&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=44lyueyc2n0e5u4srqk4derqtdl036aa&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0015&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=v8afyms02eqy9dgf7sriko4t6o0487vb&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=v8afyms02eqy9dgf7sriko4t6o0487vb&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0016&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&ckId=yltreimpx7czi7nn7vgsj1tcsws2yr18&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=yltreimpx7czi7nn7vgsj1tcsws2yr18&sfrom=search_job_pc&suggestId=",
"https://www.liepin.com/zhaopin/?city=410&dq=410&pubTime=&currentPage=21&pageSize=40&key=&suggestTag=&workYearCode=&compId=&compName=&compTag=&industry=H01$H0017&salary=&jobKind=&compScale=&compKind=&compStage=&eduLevel=&otherCity=&sfrom=search_job_pc&ckId=2s5dlhjll86ixdzq5xpf0xlcc8lvvb48&scene=condition&skId=sq7s8hngg4tsy3vsc4894oobklycof28&fkId=2s5dlhjll86ixdzq5xpf0xlcc8lvvb48&suggestId=",
]

job_data = pd.DataFrame(columns=['company_brief_address', 'company_detailed_address', 'hr_name', 'job_tags', 'job_title'
    , 'job_salary', 'company_name', 'company_intro', 'company_status', 'company_size', 'company_type', 'job_description'])

threads = []

total_count = 0

flag = 0


class AccessWebsiteThread:
    def __init__(self, liepin_url):
        self.liepin_url = liepin_url
        self.counter = 0

    def access_website(self):

        # 创建一个新的Edge浏览器实例
        driver = webdriver.Edge()

        driver.get(self.liepin_url)

        time.sleep(5)

        # 给当前网址添加登录状态的cookie
        cookie_file_path = "cookies.json"

        # 从json文件中读取cookies
        with open(cookie_file_path, 'r') as f:
            cookies = json.load(f)

        # 添加cookies到浏览器
        for cookie in cookies:
            driver.add_cookie(cookie)

        time.sleep(2)
        driver.refresh()

        # 等待一段时间，让网页加载
        time.sleep(5)

        # 获取网页源代码
        html = driver.page_source

        # 使用BeautifulSoup解析HTML
        soup = BeautifulSoup(html, 'html.parser')

        # 找到所有style为"margin-bottom: 10px;"的div元素
        divs = soup.find_all('div', style='margin-bottom: 10px;')

        for div in divs:

            company_brief_address = ""  # 公司简要地址
            company_detailed_address = ""  # 公司详细地址
            hr_name = ""  # HR姓名
            job_tags = []  # 岗位标签
            job_title = ""  # 岗位名称
            job_salary = ""  # 岗位薪资
            company_name = ""  # 公司名称
            company_intro = ""  # 公司简介
            company_status = ""  # 公司状态
            company_size = ""  # 公司规模
            company_type = ""  # 公司类型
            job_description = ""  # 职位描述

            # 从每个a元素中提取出网址
            links = div.find_all('a', attrs={'data-nick': 'job-detail-job-info'})
            urls = [link.get('href') for link in links]
            for url in urls:
                # 打开每个URL并获取页面内容
                driver.get(url)
                time.sleep(random.randint(1, 2))  # 等待一段时间，让网页加载
                page_html = driver.page_source
                page_soup = BeautifulSoup(page_html, 'html.parser')

                # 获取公司简要地址
                try:
                    div_job_properties = page_soup.find('div', class_='job-properties')
                    span = div_job_properties.find('span')
                    company_brief_address = span.text
                except Exception:
                    company_brief_address = "无"
                # print(company_brief_address)

                # 获取公司详细地址
                try:
                    div_company_other = page_soup.find('div', class_='company-other')
                    divs_label_box = div_company_other.find_all('div', class_='label-box')
                    last_div_label_box = divs_label_box[-1]
                    span_text = last_div_label_box.find('span', class_='text')
                    company_detailed_address = span_text.text
                except Exception:
                    company_detailed_address = "无"
                # print(company_detailed_address)

                # 获取HR姓名
                try:
                    section_recruiter_container = page_soup.find('section', class_='recruiter-container')
                    div_name_box = section_recruiter_container.find('div', class_='name-box')
                    span_name = div_name_box.find('span', class_='name')
                    hr_name = span_name.text
                except Exception:
                    hr_name = "无"
                # print(hr_name)

                # 获取岗位标签
                try:
                    spans = div.find_all('span', class_='labels-tag')
                    job_tags = [span.text for span in spans]
                except Exception:
                    job_tags = "无"
                # print(job_tags)

                global flag
                # 获取岗位名称
                try:
                    job_title = page_soup.find('span', class_='job-title ellipsis-2').text
                    flag = 0
                except Exception:
                    job_title = "无"
                    flag += 1
                    if flag > 10:
                        print("网站已反爬")
                        return
                print(job_title)

                # 获取岗位薪资
                try:
                    div_name_box = page_soup.find('div', class_='name-box')
                    span_salary = div_name_box.find('span', class_='salary')
                    job_salary = span_salary.text
                except Exception:
                    job_salary = "无"
                # print(job_salary)

                # 获取公司名称
                try:
                    company_name = div.find('span', class_='jsx-2693574896 company-name ellipsis-1').text
                except Exception:
                    company_name = "无"
                # print(company_name)

                # 获取公司简介
                try:
                    section_company_intro_container = page_soup.find('section', class_='company-intro-container')
                    div_inner_ellipsis = section_company_intro_container.find('div', class_='inner ellipsis-3')
                    company_intro = div_inner_ellipsis.text
                except Exception:
                    company_intro = "无"
                # print(company_intro)

                # 获取公司状态
                try:
                    div_company_other = page_soup.find('div', class_='company-other')
                    span_label = div_company_other.find('span', class_='label', string='融资阶段：')
                    span_text = span_label.find_next_sibling('span', class_='text')
                    company_status = span_text.text
                    # print(company_status)
                except Exception:
                    company_status = "无"

                # 获取公司规模
                try:
                    div_company_other = page_soup.find('div', class_='company-other')
                    span_label = div_company_other.find('span', class_='label', string='人数规模：')
                    span_text = span_label.find_next_sibling('span', class_='text')
                    company_size = span_text.text
                    # print(company_size)
                except Exception:
                    company_size = "无"

                # 获取公司类型
                try:
                    div_company_other = page_soup.find('div', class_='company-other')
                    span_label = div_company_other.find('span', class_='label', string='企业行业：')
                    span_text = span_label.find_next_sibling('span', class_='text')
                    company_type = span_text.text
                    # print(company_type)
                except Exception:
                    company_type = "无"

                # 获取职位描述
                try:
                    dd_job_intro_content = page_soup.find('dd', attrs={'data-selector': 'job-intro-content'})
                    job_description1 = dd_job_intro_content.text
                    # print(job_description1)
                except Exception:
                    job_description1 = "无"

                try:
                    div_job_properties = page_soup.find('div', class_='job-properties')
                    spans = div_job_properties.find_all('span')
                    job_description2 = [span.text for span in spans[1:] if 'split' not in span.get('class', [])]
                except Exception:
                    job_description2 = "无"

                job_description2 = ', '.join(job_description2)
                job_description2 = "其他标签: " + job_description2

                try:
                    tag_box = page_soup.find('div', class_='tag-box')
                    lis = tag_box.find_all('li')
                    job_description3 = [li.text for li in lis]
                except Exception:
                    job_description3 = "无"

                job_description3 = ', '.join(job_description3)
                job_description3 = "岗位要求： " + job_description3

                job_description = job_description1 + job_description2 + job_description3

                job_data.loc[len(job_data)] = [company_brief_address, company_detailed_address, hr_name, job_tags, job_title,
                                   job_salary, company_name, company_intro, company_status, company_size, company_type,
                                   job_description]

                global total_count
                total_count += 1
                
        self.counter += 1
        if self.counter >= 800:
            print(f"+---------{self.liepin_url}线程已爬取800条数据，终止---------+")
            return


# 创建一个线程池，最大线程数为5
with ThreadPoolExecutor(max_workers=3) as executor:
    for liepin_url in liepin_urls:
        executor.submit(AccessWebsiteThread(liepin_url).access_website)

print(f"爬取完成，已爬取{total_count}")
# 将DataFrame保存为CSV文件
job_data.to_csv('job_data2.csv', index=False, encoding='utf_8')
        
# # 关闭浏览器
# driver.close()
