import re
from collections import Counter
from selenium import webdriver
from selenium.webdriver.chrome.service import Service as ChromeService
import time
from lxml import etree
import urllib.parse

# 指定ChromeDriver路径
driver_path = '../pythonProject1.0/requests/chromedriver.exe'
service = ChromeService(executable_path=driver_path)
driver = webdriver.Chrome(service=service)

base_url = 'https://www.zhipin.com'

class TrainSpider:
    login_url = f'{base_url}/web/user/?ka=header-login'  # 登录页面的URL
    job_descriptions = []  # 初始化职位描述列表



    def login(self):
        try:
            driver.get(self.login_url)  # 打开登录页面
            print("登录页面已成功打开")
            # 在这里添加登录操作
        except  :
            print("等待！")

    def get_company_info(self):
        # 获取用户输入的查询内容
        profession = input("请输入查询的职业：")
        encoded_profession = urllib.parse.quote(profession)  # 对查询内容进行URL编码
        city_code = "101280100"
        self.redirect_url = f"{base_url}/web/geek/job?query={encoded_profession}&city={city_code}"

        # 登录成功后直接进入指定页面
        self.login()  # 调用登录方法
        driver.get(self.redirect_url)  # 跳转到搜索结果页面
        time.sleep(5)  # 等待页面加载 # 等待页面加载

        # 检查页面是否正确加载
        if driver.current_url == self.redirect_url:
            print("已成功跳转到指定页面")
        else:
            print(f"未跳转到指定页面，当前页面为：{driver.current_url}")
            return

        # 获取页面源码
        page_source = driver.page_source
        tree = etree.HTML(page_source)  # 解析页面源码

        # 获取公司名称和URL
        try:
            company_list = tree.xpath('//div[@class="search-job-result"]/ul/li')  # 获取公司列表元素
            with open('../pythonProject1.0/requests/company_info.txt', 'w', encoding='utf-8') as fp:  # 打开文件以写入
                for li in company_list:
                    name_elements = li.xpath('./div[1]/a/div[1]/span[1]/text()')  # 获取公司名称
                    link_elements = li.xpath('./div[1]/a/@href')  # 获取公司链接

                    if name_elements and link_elements:
                        company_name = name_elements[0]
                        company_url = f'{base_url}{link_elements[0]}'  # 拼接完整的公司URL
                        print(f"Company Name: {company_name}, URL: {company_url}")

                        # 访问每个公司页面，抓取职位描述
                        driver.get(company_url)  # 访问公司页面
                        time.sleep(5)  # 等待页面加载
                        job_page_source = driver.page_source
                        job_tree = etree.HTML(job_page_source)  # 解析职位页面源码
                        job_description_elements = job_tree.xpath('/html/body/div[1]/div[2]/div[3]/div/div[2]/div[1]/div[3]//text()')  # 获取职位描述
                        job_description = " ".join([desc.strip() for desc in job_description_elements])  # 组合职位描述
                        print(f"Job Description: {job_description}")
                        self.job_descriptions.append(job_description)  # 将职位描述添加到列表中
                        # 将公司名称、URL和职位描述写入文件
                        fp.write(f"Company Name: {company_name}, URL: {company_url}, Job Description: {job_description}\n")
                    else:
                        print("未找到公司名称或URL")
        except Exception as e:
            print("获取公司信息失败：", e)

    def analyze_job_descriptions(self):
        # 分析职位描述文本
        def extract_requirements(text):
            # 提取"要求"后的内容，并分割成多个点
            requirement_parts = re.findall(r'要求[：:](.*)', text)
            requirements = []
            for part in requirement_parts:
                requirements.extend(re.split(r'；|，|。', part))
            clean_requirements = [re.sub(r'\d+、', '', req).strip() for req in requirements if req.strip()]
            return clean_requirements

        all_requirements = []
        for text in self.job_descriptions:
            all_requirements.extend(extract_requirements(text))

        # 统计所有职位要求的频次
        requirement_counts = Counter(all_requirements)

        # 获取最常见的十个职位要求
        most_common_requirements = requirement_counts.most_common(10)

        # 打印总结的十点相似要求
        print("\n总结的十点相似要求：")
        for i, (requirement, _) in enumerate(most_common_requirements):
            print(f"{i+1}、{requirement}")

    def run(self):
        self.login()  # 执行登录
        self.get_company_info()  # 获取公司信息
        self.analyze_job_descriptions()  # 分析职位描述

        # 保持浏览器窗口打开
        print("任务完成。请手动关闭浏览器窗口。")
        while True:  # 无限循环，保持浏览器窗口打开
            pass

def start():
    spider = TrainSpider()  # 创建TrainSpider对象
    spider.run()  # 运行run方法

if __name__ == '__main__':
    start()
