import time
import json
import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
from webdriver_manager.chrome import ChromeDriverManager
import pdfkit

# 1. 设置无头浏览器
chrome_options = Options()
chrome_options.add_argument("--headless")

# 2. 初始化driver（自动下载驱动）
service = Service(ChromeDriverManager().install())
driver = webdriver.Chrome(service=service, options=chrome_options)

try:
    url = 'https://xiaoyuan.zhaopin.com/job/CC235435310J40670560311'
    driver.get(url)

    # 3. 显式等待，等待职位名称元素加载完毕，最多等10秒
    wait = WebDriverWait(driver, 10)
    wait.until(EC.presence_of_element_located((By.CLASS_NAME, 'job-banner__name')))

    # 4. 获取页面源码
    html_content = driver.page_source

    # 5. 用BeautifulSoup解析
    soup = BeautifulSoup(html_content, 'html.parser')

    # 7. 提取包含 JSON 的 <script> 标签
    script_tag = soup.find('script', string=re.compile('window.__INITIAL_DATA__'))

    if script_tag:
        # 打印部分内容方便调试
        print("script 标签内容预览:", script_tag.string[:500])
        script_content = script_tag.string.strip()
        # 使用贪婪匹配，匹配尽可能多内容，包含换行
        # 修改正则表达式：只提取等号后面的 JSON 字符串
        json_start = script_content.find('window.__INITIAL_DATA__ = ') + len('window.__INITIAL_DATA__ = ')
        json_raw = script_content[json_start:]
        json_data = json.loads(json_raw)
        print("解析后的 JSON 数据:"+json.dumps(json_data, ensure_ascii=False, indent=2))
    # 提取部分数据
        extracted_data = {
        "positionName": json_data["main"]["positionDetail"].get("positionName", ""),
        "positionWorkCity": json_data["main"]["positionDetail"].get("positionWorkCity", ""),
        "salary": json_data["main"]["positionDetail"].get("salary60", ""),
        "jobDesc": json_data["main"]["positionDetail"].get("jobDesc", ""),
        "companyName": json_data["main"]["companyDetail"].get("companyName", ""),
        "companySize": json_data["main"]["companyDetail"].get("companySize", ""),
        "workingExp": json_data["main"]["positionDetail"].get("positionWorkingExp", "")
     }

    # 保存到文件
    with open("../json/extracted_position.json", "w", encoding="utf-8") as f:
        json.dump(extracted_data, f, ensure_ascii=False, indent=2)

    # 8. 构造HTML并保存
    html_str = """
    <!DOCTYPE html>
    <html lang="zh-cn">
    <head>
         <meta charset="utf-8">
         <title>Document</title>
    </head>
    <body>
    {content_html}
    </body>
    </html>
    """

    content_1 = soup.find('div', class_='job-banner').get_text(separator="\n") if soup.find('div',
                                                                                            class_='job-banner') else ''
    content_2 = soup.find('div', class_='main-body__block').get_text(separator="\n") if soup.find('div',
                                                                                                  class_='main-body__block') else ''
    content_3 = content_1 + "\n" + content_2
    content_html = content_3.replace("\n", "<br>\n")
    html = html_str.format(content_html=content_html)
    htmlPath = 'D:/project/其他/HTML/1.html'
    htmlPathPDF = 'D:/project/其他/HTML/1.pdf'
    with open(htmlPath, 'w', encoding='utf-8') as f:
        f.write(html)



    # 9. 转PDF
    config = pdfkit.configuration(wkhtmltopdf=r'D:/Work/pdf/wkhtmltopdf/bin/wkhtmltopdf.exe')
    pdfkit.from_file(htmlPath, htmlPathPDF, configuration=config, options={'no-images': ''})
finally:
    driver.quit()






