import requests
import pandas as pd
import time
import random
from bs4 import BeautifulSoup
import datetime

class BossSpiderAPI:
    def __init__(self):
        """
           Cookie需要登录后去网页上获取
        """
        self.base_url = "https://www.zhipin.com/wapi/zpgeek/search/joblist.json"
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/134.0.0.0 Safari/537.36",
            "Referer": "https://www.zhipin.com/",
            "Cookie": "lastCity=101220100; __g=-; Hm_lvt_194df3105ad7148dcf2b98a91b5e727a=1744206399; HMACCOUNT=42C280446392FB0C; wt2=DlNySPpvwfKbhVut0Gr0mgKTqXmBwfqcUqRyST97FmpG1q4fuYfpOEhCiOr1dttN64ZIAMYJ2WlH238k6edxPQw~~; wbg=0; zp_at=5MyB3H6qawmfWoNUp314MzOYgaBoo-yV_45NFKxF4IU~; Hm_lpvt_194df3105ad7148dcf2b98a91b5e727a=1744208479; bst=V2QNMjEeP_0llgXdJtyh0QICmw7D7Rwg~~|QNMjEeP_0llgXdJtyh0QICmw7DvRwQ~~; __c=1744206399; __a=15610779.1712562134.1730179058.1744206399.26.3.12.26; __zp_stoken__=6578fRUnDnsOIwp3DiD42DRYRGxg9LUxJOzJLRTVDSUlFSUVLS0VJPSk9NTLDhl%2FDg8OSX8OcwrzCvUY2RUlFRUlDSURCJkU9wr5FSjA2w4Njw4rDkl%2FDjRjFhcOMHMKww4w3wpXDiRHCqMOIGFUvNyvDiEg9REjDssOLw6XDgcKOw4fDoMOIbcOLw6TCvT1MSMOJLkESaRxpQUxVW2sPWmFcYWVOFlNPVjpIRkpEccOML0sNHBgcGBYTDxMPExYaEQ0WEw8TDxcSDhIOPEXCnsOMWMK8w6DEosO5xKrCnk3CocK7xITCosKtZMKSwqXEgWPCtVhUwrPCrsK2wqbCrsKzXsKvw4tNwr7CpFjCqV%2FCrlFXw4hPHMKAd8OEw4p4WcK%2BVQ4ZwokPFEwVH1nDlw%3D%3D"
        }
        self.detail_headers = {
            "User-Agent": self.headers["User-Agent"],
            "Cookie": self.headers["Cookie"]
        }
        self.params = {
            "scene": "1",
            "query": "Python高级开发工程师",
            "city": "101220100",# 城市：福州
            "experience": "106",# 经验：3-5年
            "scale": "303,304,305",# 公司规模：100-999人
            "page": 1,
            "pageSize": 30
        }
        self.data_list = []

    def fetch_data(self, max_pages=3):
        for page in range(1, max_pages + 1):
            print(f"抓取第 {page} 页...")
            self.params['page'] = page
            try:
                resp = requests.get(self.base_url, headers=self.headers, params=self.params)
                resp.raise_for_status()
            except requests.RequestException as e:
                print(f"请求失败：{e}")
                continue

            result = resp.json()
            job_list = result.get("zpData", {}).get("jobList", [])
            if not job_list:
                print("没有更多数据了")
                break

            for job in job_list:
                job_id = job.get("encryptJobId")
                job_desc = self.get_job_detail(job_id)

                item = {
                    "职位": job.get("jobName"),
                    "公司": job.get("brandName"),
                    "薪资": job.get("salaryDesc"),
                    "地区": job.get("cityName"),
                    "经验": job.get("jobExperience"),
                    "学历": job.get("jobDegree"),
                    "公司规模": job.get("brandScaleName"),
                    "行业": job.get("brandIndustry"),
                    "福利标签": ",".join(job.get("welfareList", [])),
                    "技能标签": ",".join(job.get("skills", [])),
                    "职位描述": job_desc
                }
                self.data_list.append(item)
                time.sleep(random.uniform(1, 1.5))

    def get_job_detail(self, job_id):
        url = f"https://www.zhipin.com/job_detail/{job_id}.html"
        try:
            resp = requests.get(url, headers=self.detail_headers)
            if resp.status_code != 200:
                return ""
            soup = BeautifulSoup(resp.text, 'html.parser')
            desc_tag = soup.select_one('.job-sec-text')
            return desc_tag.text.strip() if desc_tag else ""
        except Exception as e:
            print(f"详情页获取失败: {e}")
            return ""

    def save_excel(self):
        filename = f"python岗位数据_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.xlsx"
        df = pd.DataFrame(self.data_list)
        df.to_excel(filename, index=False)
        print(f"保存成功：{filename}，共 {len(df)} 条职位")

    def run(self):
        self.fetch_data()
        self.save_excel()

if __name__ == "__main__":
    spider = BossSpiderAPI()
    spider.run()
